code
stringlengths
17
6.64M
def contrast(img, factor, **__): return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__): return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__): return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__): return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v): 'With 50% prob, negate the value' return ((- v) if (random.random() > 0.5) else v)
def _rotate_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 30.0) level = _randomly_negate(level) return (level,)
def _enhance_level_to_arg(level, _hparams): return ((((level / _MAX_LEVEL) * 1.8) + 0.1),)
def _enhance_increasing_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 0.9) level = (1.0 + _randomly_negate(level)) return (level,)
def _shear_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 0.3) level = _randomly_negate(level) return (level,)
def _translate_abs_level_to_arg(level, hparams): translate_const = hparams['translate_const'] level = ((level / _MAX_LEVEL) * float(translate_const)) level = _randomly_negate(level) return (level,)
def _translate_rel_level_to_arg(level, hparams): translate_pct = hparams.get('translate_pct', 0.45) level = ((level / _MAX_LEVEL) * translate_pct) level = _randomly_negate(level) return (level,)
def _posterize_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 4)),)
def _posterize_increasing_level_to_arg(level, hparams): return ((4 - _posterize_level_to_arg(level, hparams)[0]),)
def _posterize_original_level_to_arg(level, _hparams): return ((int(((level / _MAX_LEVEL) * 4)) + 4),)
def _solarize_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 256)),)
def _solarize_increasing_level_to_arg(level, _hparams): return ((256 - _solarize_level_to_arg(level, _hparams)[0]),)
def _solarize_add_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 110)),)
class AugmentOp(): '\n Apply for video.\n ' def __init__(self, name, prob=0.5, magnitude=10, hparams=None): hparams = (hparams or _HPARAMS_DEFAULT) self.aug_fn = NAME_TO_OP[name] self.level_fn = LEVEL_TO_ARG[name] self.prob = prob self.magnitude = magnitude self.hparams = hparams.copy() self.kwargs = {'fillcolor': (hparams['img_mean'] if ('img_mean' in hparams) else _FILL), 'resample': (hparams['interpolation'] if ('interpolation' in hparams) else _RANDOM_INTERPOLATION)} self.magnitude_std = self.hparams.get('magnitude_std', 0) def __call__(self, img_list): if ((self.prob < 1.0) and (random.random() > self.prob)): return img_list magnitude = self.magnitude if (self.magnitude_std and (self.magnitude_std > 0)): magnitude = random.gauss(magnitude, self.magnitude_std) magnitude = min(_MAX_LEVEL, max(0, magnitude)) level_args = (self.level_fn(magnitude, self.hparams) if (self.level_fn is not None) else ()) if isinstance(img_list, list): return [self.aug_fn(img, *level_args, **self.kwargs) for img in img_list] else: return self.aug_fn(img_list, *level_args, **self.kwargs)
def _select_rand_weights(weight_idx=0, transforms=None): transforms = (transforms or _RAND_TRANSFORMS) assert (weight_idx == 0) rand_weights = _RAND_CHOICE_WEIGHTS_0 probs = [rand_weights[k] for k in transforms] probs /= np.sum(probs) return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None): hparams = (hparams or _HPARAMS_DEFAULT) transforms = (transforms or _RAND_TRANSFORMS) return [AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms]
class RandAugment(): def __init__(self, ops, num_layers=2, choice_weights=None): self.ops = ops self.num_layers = num_layers self.choice_weights = choice_weights def __call__(self, img): ops = np.random.choice(self.ops, self.num_layers, replace=(self.choice_weights is None), p=self.choice_weights) for op in ops: img = op(img) return img
def rand_augment_transform(config_str, hparams): "\n RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719\n\n Create a RandAugment transform\n :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by\n dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining\n sections, not order sepecific determine\n 'm' - integer magnitude of rand augment\n 'n' - integer num layers (number of transform ops selected per image)\n 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)\n 'mstd' - float std deviation of magnitude noise applied\n 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)\n Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5\n 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2\n :param hparams: Other hparams (kwargs) for the RandAugmentation scheme\n :return: A PyTorch compatible Transform\n " magnitude = _MAX_LEVEL num_layers = 2 weight_idx = None transforms = _RAND_TRANSFORMS config = config_str.split('-') assert (config[0] == 'rand') config = config[1:] for c in config: cs = re.split('(\\d.*)', c) if (len(cs) < 2): continue (key, val) = cs[:2] if (key == 'mstd'): hparams.setdefault('magnitude_std', float(val)) elif (key == 'inc'): if bool(val): transforms = _RAND_INCREASING_TRANSFORMS elif (key == 'm'): magnitude = int(val) elif (key == 'n'): num_layers = int(val) elif (key == 'w'): weight_idx = int(val) else: assert NotImplementedError ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms) choice_weights = (None if (weight_idx is None) else _select_rand_weights(weight_idx)) return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
def get_args(): parser = argparse.ArgumentParser('MVD pre-training script', add_help=False) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--epochs', default=800, type=int) parser.add_argument('--save_ckpt_freq', default=50, type=int) parser.add_argument('--update_freq', default=1, type=int) parser.add_argument('--model', default='pretrain_masked_video_student_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--decoder_depth', default=2, type=int, help='depth of decoder') parser.add_argument('--image_teacher_model', default='mae_teacher_vit_base_patch16', type=str, metavar='MODEL', help='Name of teacher model') parser.add_argument('--image_teacher_model_ckpt_path', default='', type=str) parser.add_argument('--distillation_target_dim', default=768, type=int) parser.add_argument('--distill_loss_func', default='SmoothL1', choices=['L1', 'L2', 'SmoothL1'], type=str) parser.add_argument('--image_teacher_loss_weight', default=1.0, type=float) parser.add_argument('--video_teacher_model', default='pretrain_videomae_teacher_base_patch16_224', type=str, metavar='MODEL', help='Name of teacher model') parser.add_argument('--video_teacher_model_ckpt_path', default='', type=str) parser.add_argument('--video_distillation_target_dim', default=768, type=int) parser.add_argument('--video_distill_loss_func', default='SmoothL1', choices=['L1', 'L2', 'SmoothL1'], type=str) parser.add_argument('--video_teacher_loss_weight', default=1.0, type=float) parser.add_argument('--video_teacher_drop_path', default=0.0, type=float) parser.add_argument('--teacher_input_size', default=224, type=int, help='videos input size for backbone') parser.add_argument('--video_teacher_input_size', default=224, type=int, help='videos input size for backbone') parser.add_argument('--feat_decoder_embed_dim', default=None, type=int) parser.add_argument('--feat_decoder_num_heads', default=None, type=int) parser.add_argument('--norm_feature', action='store_true', default=False) parser.add_argument('--tubelet_size', default=2, type=int) parser.add_argument('--mask_type', default='tube', choices=['random', 'tube'], type=str, help='masked strategy of video tokens/patches') parser.add_argument('--mask_ratio', default=0.75, type=float, help='ratio of the visual tokens/patches need be masked') parser.add_argument('--input_size', default=224, type=int, help='videos input size for backbone') parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--use_checkpoint', action='store_true', default=False) parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt_eps', default=1e-08, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--weight_decay_end', type=float, default=None, help='Final value of the\n weight decay. We use a cosine schedule for WD. \n (Set the same value with args.weight_decay to keep weight decay no change)') parser.add_argument('--model_key', default='model|module', type=str) parser.add_argument('--model_prefix', default='', type=str) parser.add_argument('--lr', type=float, default=0.00015, metavar='LR', help='learning rate (default: 1.5e-4)') parser.add_argument('--warmup_lr', type=float, default=1e-06, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min_lr', type=float, default=1e-05, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--warmup_steps', type=int, default=(- 1), metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--color_jitter', type=float, default=0.6, metavar='PCT', help='Color jitter factor (default: 0.6)') parser.add_argument('--color_jitter_hue', type=float, default=0.15, metavar='PCT', help='Color jitter Hue factor (default: 0.15)') parser.add_argument('--gray_scale', type=float, default=0.2, metavar='PCT', help='Gray scale factor (default: 0.2)') parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--num_sample', type=int, default=1, help='Repeated_aug (default: 1)') parser.add_argument('--data_root', default=None, type=str, help='dataset path root') parser.add_argument('--data_path', default='/path/to/list_kinetics-400', type=str, help='path of dataset file list') parser.add_argument('--imagenet_default_mean_and_std', default=True, action='store_true') parser.add_argument('--num_frames', type=int, default=16) parser.add_argument('--sampling_rate', type=int, default=4) parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default=None, help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--auto_resume', action='store_true') parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') parser.set_defaults(auto_resume=True) parser.add_argument('--load_model', default=None, help='init from checkpoint') parser.add_argument('--use_cls_token', action='store_true', default=False) parser.add_argument('--time_stride_loss', action='store_true', default=True, help='predict one frame per temporal stride if true') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem', help='') parser.set_defaults(pin_mem=True) parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=(- 1), type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') return parser.parse_args()
def get_image_teacher_model(args): print(f'Creating teacher model: {args.image_teacher_model}') model = create_model(args.image_teacher_model, pretrained=False, img_size=args.teacher_input_size) return model
def get_video_teacher_model(args): print(f'Creating teacher model: {args.video_teacher_model}') model = create_model(args.video_teacher_model, pretrained=False, img_size=args.video_teacher_input_size, drop_path_rate=args.video_teacher_drop_path) return model
def get_model(args): print(f'Creating model: {args.model}') model = create_model(args.model, pretrained=False, drop_path_rate=args.drop_path, drop_block_rate=None, decoder_depth=args.decoder_depth, use_cls_token=args.use_cls_token, num_frames=args.num_frames, target_feature_dim=args.distillation_target_dim, target_video_feature_dim=args.video_distillation_target_dim, feat_decoder_embed_dim=args.feat_decoder_embed_dim, feat_decoder_num_heads=args.feat_decoder_num_heads, use_checkpoint=args.use_checkpoint, tubelet_size=args.tubelet_size) return model
def main(args): utils.init_distributed_mode(args) print(args) device = torch.device(args.device) seed = (args.seed + utils.get_rank()) torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True model = get_model(args) model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) patch_size = model.patch_size print(('Patch size = %s' % str(patch_size))) args.window_size = ((args.num_frames // args.tubelet_size), (args.input_size // patch_size[0]), (args.input_size // patch_size[1])) args.patch_size = patch_size dataset_train = build_distillation_dataset(args) num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True) print(('Sampler_train = %s' % str(sampler_train))) if ((global_rank == 0) and (args.log_dir is not None)): os.makedirs(args.log_dir, exist_ok=True) log_writer = utils.TensorboardLogger(log_dir=args.log_dir) else: log_writer = None if (args.num_sample > 1): collate_func = partial(multiple_pretrain_samples_collate, fold=False) else: collate_func = None data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, worker_init_fn=utils.seed_worker, collate_fn=collate_func) model.to(device) model_without_ddp = model n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad)) image_teacher_model = get_image_teacher_model(args) if args.image_teacher_model_ckpt_path: if args.image_teacher_model_ckpt_path.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url(args.image_teacher_model_ckpt_path, map_location='cpu', check_hash=True) else: checkpoint = torch.load(args.image_teacher_model_ckpt_path, map_location='cpu') print(('Load teacher ckpt from %s' % args.image_teacher_model_ckpt_path)) checkpoint_model = None for model_key in args.model_key.split('|'): if (model_key in checkpoint): checkpoint_model = checkpoint[model_key] print(('Load state_dict by model_key = %s' % model_key)) break if (checkpoint_model is None): checkpoint_model = checkpoint for k in ['head.weight', 'head.bias']: if (k in checkpoint_model): print(f'Removing key {k} from pretrained checkpoint') del checkpoint_model[k] all_keys = list(checkpoint_model.keys()) new_dict = OrderedDict() for key in all_keys: if key.startswith('backbone.'): new_dict[key[9:]] = checkpoint_model[key] elif ('pos_embed' in key): continue else: new_dict[key] = checkpoint_model[key] checkpoint_model = new_dict utils.load_state_dict(image_teacher_model, checkpoint_model, prefix=args.model_prefix) image_teacher_model.to(device) video_teacher_model = get_video_teacher_model(args) if args.video_teacher_model_ckpt_path: if args.video_teacher_model_ckpt_path.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url(args.video_teacher_model_ckpt_path, map_location='cpu', check_hash=True) else: checkpoint = torch.load(args.video_teacher_model_ckpt_path, map_location='cpu') print(('Load video teacher ckpt from %s' % args.video_teacher_model_ckpt_path)) checkpoint_model = None for model_key in args.model_key.split('|'): if (model_key in checkpoint): checkpoint_model = checkpoint[model_key] print(('Load video state_dict by model_key = %s' % model_key)) break if (checkpoint_model is None): checkpoint_model = checkpoint for k in ['head.weight', 'head.bias']: if (k in checkpoint_model): print(f'Removing key {k} from pretrained checkpoint') del checkpoint_model[k] all_keys = list(checkpoint_model.keys()) new_dict = OrderedDict() for key in all_keys: if key.startswith('backbone.'): new_dict[key[9:]] = checkpoint_model[key] elif ('pos_embed' in key): continue else: new_dict[key] = checkpoint_model[key] checkpoint_model = new_dict utils.load_state_dict(video_teacher_model, checkpoint_model, prefix=args.model_prefix) video_teacher_model.to(device) print(('Model = %s' % str(model_without_ddp))) print('number of params: {} M'.format((n_parameters / 1000000.0))) total_batch_size = (((args.batch_size * args.num_sample) * args.update_freq) * utils.get_world_size()) num_training_steps_per_epoch = (len(dataset_train) // int((total_batch_size / args.num_sample))) args.lr = ((args.lr * total_batch_size) / 256) args.min_lr = ((args.min_lr * total_batch_size) / 256) args.warmup_lr = ((args.warmup_lr * total_batch_size) / 256) print(('LR = %.8f' % args.lr)) print(('Batch size = %d' % total_batch_size)) print(('Update frequent = %d' % args.update_freq)) print(('Number of training steps = %d' % num_training_steps_per_epoch)) print(('Number of training examples per epoch = %d' % (total_batch_size * num_training_steps_per_epoch))) if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False) model_without_ddp = model.module optimizer = create_optimizer(args, model_without_ddp) loss_scaler = NativeScaler() print('Use step level LR & WD scheduler!') lr_schedule_values = utils.cosine_scheduler(args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps) if (args.weight_decay_end is None): args.weight_decay_end = args.weight_decay wd_schedule_values = utils.cosine_scheduler(args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) print(('Max WD = %.7f, Min WD = %.7f' % (max(wd_schedule_values), min(wd_schedule_values)))) if (args.output_dir and utils.is_main_process()): with open(os.path.join(args.output_dir, 'config.txt'), mode='a', encoding='utf-8') as f: for arg in vars(args): f.write((((format(arg, '<20') + ' ') + format(str(getattr(args, arg)), '<')) + '\n')) utils.auto_load_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=None) torch.cuda.empty_cache() print(f'Start training for {args.epochs} epochs') start_time = time.time() for epoch in range(args.start_epoch, args.epochs): if args.distributed: data_loader_train.sampler.set_epoch(epoch) if (log_writer is not None): log_writer.set_step(((epoch * num_training_steps_per_epoch) * args.update_freq)) train_stats = train_one_epoch(args, model, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, log_writer=log_writer, start_steps=(epoch * num_training_steps_per_epoch), lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values, update_freq=args.update_freq, time_stride_loss=True, image_teacher_model=image_teacher_model, video_teacher_model=video_teacher_model, norm_feature=args.norm_feature) if args.output_dir: if ((((epoch + 1) % args.save_ckpt_freq) == 0) or ((epoch + 1) == args.epochs)): utils.save_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch, model_ema=None) log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters} if (args.output_dir and utils.is_main_process()): if (log_writer is not None): log_writer.flush() with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f: f.write((json.dumps(log_stats) + '\n')) total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str))
class SmoothedValue(object): 'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n ' def __init__(self, window_size=20, fmt=None): if (fmt is None): fmt = '{median:.4f} ({global_avg:.4f})' self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += (value * n) def synchronize_between_processes(self): '\n Warning: does not synchronize the deque!\n ' if (not is_dist_avail_and_initialized()): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return (self.total / self.count) @property def max(self): return max(self.deque) @property def value(self): return self.deque[(- 1)] def __str__(self): return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value)
class MetricLogger(object): def __init__(self, delimiter='\t'): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for (k, v) in kwargs.items(): if (v is None): continue if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if (attr in self.meters): return self.meters[attr] if (attr in self.__dict__): return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) def __str__(self): loss_str = [] for (name, meter) in self.meters.items(): loss_str.append('{}: {}'.format(name, str(meter))) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if (not header): header = '' start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}') space_fmt = ((':' + str(len(str(len(iterable))))) + 'd') log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}'] if torch.cuda.is_available(): log_msg.append('max mem: {memory:.0f}') log_msg = self.delimiter.join(log_msg) MB = (1024.0 * 1024.0) for obj in iterable: data_time.update((time.time() - end)) (yield obj) iter_time.update((time.time() - end)) if (((i % print_freq) == 0) or (i == (len(iterable) - 1))): eta_seconds = (iter_time.global_avg * (len(iterable) - i)) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB))) else: print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time))) i += 1 end = time.time() total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable)))) self.update(total_time=total_time)
class TensorboardLogger(object): def __init__(self, log_dir): self.writer = SummaryWriter(logdir=log_dir) self.step = 0 def set_step(self, step=None): if (step is not None): self.step = step else: self.step += 1 def update(self, head='scalar', step=None, **kwargs): for (k, v) in kwargs.items(): if (v is None): continue if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.writer.add_scalar(((head + '/') + k), v, (self.step if (step is None) else step)) def flush(self): self.writer.flush()
def seed_worker(worker_id): worker_seed = (torch.initial_seed() % (2 ** 32)) np.random.seed(worker_seed) random.seed(worker_seed)
def _load_checkpoint_for_ema(model_ema, checkpoint): '\n Workaround for ModelEma._load_checkpoint to accept an already-loaded object\n ' mem_file = io.BytesIO() temp = {} temp['state_dict_ema'] = checkpoint torch.save(temp, mem_file) mem_file.seek(0) model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master): '\n This function disables printing when not in master process\n ' import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if (is_master or force): builtin_print(*args, **kwargs) __builtin__.print = print
def is_dist_avail_and_initialized(): if (not dist.is_available()): return False if (not dist.is_initialized()): return False return True
def get_world_size(): if (not is_dist_avail_and_initialized()): return 1 return dist.get_world_size()
def get_rank(): if (not is_dist_avail_and_initialized()): return 0 return dist.get_rank()
def is_main_process(): return (get_rank() == 0)
def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs)
def init_distributed_mode(args): if args.dist_on_itp: args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) args.dist_url = ('tcp://%s:%s' % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])) os.environ['LOCAL_RANK'] = str(args.gpu) os.environ['RANK'] = str(args.rank) os.environ['WORLD_SIZE'] = str(args.world_size) elif ('SLURM_PROCID' in os.environ): args.rank = int(os.environ['SLURM_PROCID']) args.gpu = int(os.environ['SLURM_LOCALID']) args.world_size = int(os.environ['SLURM_NTASKS']) os.environ['RANK'] = str(args.rank) os.environ['LOCAL_RANK'] = str(args.gpu) os.environ['WORLD_SIZE'] = str(args.world_size) node_list = os.environ['SLURM_NODELIST'] addr = subprocess.getoutput(f'scontrol show hostname {node_list} | head -n1') if ('MASTER_ADDR' not in os.environ): os.environ['MASTER_ADDR'] = addr elif (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)): args.rank = int(os.environ['RANK']) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}, gpu {}'.format(args.rank, args.dist_url, args.gpu), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank, timeout=datetime.timedelta(seconds=28800)) torch.distributed.barrier() setup_for_distributed((args.rank == 0))
def load_state_dict(model, state_dict, prefix='', ignore_missing='relative_position_index'): missing_keys = [] unexpected_keys = [] error_msgs = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if (metadata is not None): state_dict._metadata = metadata def load(module, prefix=''): local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {})) module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for (name, child) in module._modules.items(): if (child is not None): load(child, ((prefix + name) + '.')) load(model, prefix=prefix) warn_missing_keys = [] ignore_missing_keys = [] for key in missing_keys: keep_flag = True for ignore_key in ignore_missing.split('|'): if (ignore_key in key): keep_flag = False break if keep_flag: warn_missing_keys.append(key) else: ignore_missing_keys.append(key) missing_keys = warn_missing_keys if (len(missing_keys) > 0): print('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, missing_keys)) if (len(unexpected_keys) > 0): print('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, unexpected_keys)) if (len(ignore_missing_keys) > 0): print('Ignored weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, ignore_missing_keys)) if (len(error_msgs) > 0): print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount(): state_dict_key = 'amp_scaler' def __init__(self): self._scaler = torch.cuda.amp.GradScaler() def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): self._scaler.scale(loss).backward(create_graph=create_graph) if update_grad: if (clip_grad is not None): assert (parameters is not None) self._scaler.unscale_(optimizer) norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) else: self._scaler.unscale_(optimizer) norm = get_grad_norm_(parameters) self._scaler.step(optimizer) self._scaler.update() else: norm = None return norm def state_dict(self): return self._scaler.state_dict() def load_state_dict(self, state_dict): self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float=2.0) -> torch.Tensor: if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = [p for p in parameters if (p.grad is not None)] norm_type = float(norm_type) if (len(parameters) == 0): return torch.tensor(0.0) device = parameters[0].grad.device if (norm_type == inf): total_norm = max((p.grad.detach().abs().max().to(device) for p in parameters)) else: total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) return total_norm
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0, warmup_steps=(- 1)): warmup_schedule = np.array([]) warmup_iters = (warmup_epochs * niter_per_ep) if (warmup_steps > 0): warmup_iters = warmup_steps print(('Set warmup steps = %d' % warmup_iters)) if (warmup_epochs > 0): warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) iters = np.arange(((epochs * niter_per_ep) - warmup_iters)) schedule = np.array([(final_value + ((0.5 * (base_value - final_value)) * (1 + math.cos(((math.pi * i) / len(iters)))))) for i in iters]) schedule = np.concatenate((warmup_schedule, schedule)) assert (len(schedule) == (epochs * niter_per_ep)) return schedule
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): output_dir = Path(args.output_dir) epoch_name = str(epoch) if (loss_scaler is not None): checkpoint_paths = [(output_dir / ('checkpoint-%s.pth' % epoch_name))] for checkpoint_path in checkpoint_paths: to_save = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args} if (model_ema is not None): to_save['model_ema'] = get_state_dict(model_ema) save_on_master(to_save, checkpoint_path) else: client_state = {'epoch': epoch} if (model_ema is not None): client_state['model_ema'] = get_state_dict(model_ema) model.save_checkpoint(save_dir=args.output_dir, tag=('checkpoint-%s' % epoch_name), client_state=client_state)
def remove_key_in_checkpoint(checkpoint, remove_key_list=None): if isinstance(remove_key_list, list): original_key_list = list(checkpoint.keys()) for k in original_key_list: for remove_key in remove_key_list: if (remove_key in k): del checkpoint[k] return checkpoint
def auto_find_start_epoch(args): output_dir = Path(args.output_dir) if (args.auto_resume and (len(args.resume) == 0)): import glob all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth')) latest_ckpt = (- 1) for ckpt in all_checkpoints: t = ckpt.split('-')[(- 1)].split('.')[0] if t.isdigit(): latest_ckpt = max(int(t), latest_ckpt) return max(latest_ckpt, 0) else: return 0
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): output_dir = Path(args.output_dir) if (loss_scaler is not None): if (args.auto_resume and (len(args.resume) == 0)): import glob all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth')) latest_ckpt = (- 1) for ckpt in all_checkpoints: t = ckpt.split('-')[(- 1)].split('.')[0] if t.isdigit(): latest_ckpt = max(int(t), latest_ckpt) if (latest_ckpt >= 0): args.resume = os.path.join(output_dir, ('checkpoint-%d.pth' % latest_ckpt)) print(('Auto resume checkpoint: %s' % args.resume)) if args.resume: if args.resume.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location='cpu', check_hash=True) else: checkpoint = torch.load(args.resume, map_location='cpu') model_without_ddp.load_state_dict(checkpoint['model']) print(('Resume checkpoint %s' % args.resume)) if (('optimizer' in checkpoint) and ('epoch' in checkpoint) and (type(checkpoint['epoch']) is int)): optimizer.load_state_dict(checkpoint['optimizer']) args.start_epoch = (checkpoint['epoch'] + 1) if (hasattr(args, 'model_ema') and args.model_ema): _load_checkpoint_for_ema(model_ema, checkpoint['model_ema']) if ('scaler' in checkpoint): loss_scaler.load_state_dict(checkpoint['scaler']) print('With optim & sched!') elif (args.eval and args.resume_best): print('Auto resume checkpoint: best') (_, client_states) = model.load_checkpoint(args.output_dir, tag='checkpoint-best') args.start_epoch = 1 if (model_ema is not None): if args.model_ema: _load_checkpoint_for_ema(model_ema, client_states['model_ema']) elif (args.auto_resume and (len(args.resume) == 0)): import glob all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*')) latest_ckpt = (- 1) for ckpt in all_checkpoints: t = ckpt.split('-')[(- 1)].split('.')[0] if t.isdigit(): latest_ckpt = max(int(t), latest_ckpt) if (latest_ckpt >= 0): args.resume = os.path.join(output_dir, ('checkpoint-%d' % latest_ckpt)) print(('Auto resume checkpoint: %d' % latest_ckpt)) (_, client_states) = model.load_checkpoint(args.output_dir, tag=('checkpoint-%d' % latest_ckpt)) args.start_epoch = (client_states['epoch'] + 1) if (model_ema is not None): if args.model_ema: _load_checkpoint_for_ema(model_ema, client_states['model_ema']) else: latest_ckpt = int(args.resume) print(('Auto resume checkpoint: %d' % latest_ckpt)) (_, client_states) = model.load_checkpoint(args.output_dir, tag=('checkpoint-%d' % latest_ckpt)) args.start_epoch = (client_states['epoch'] + 1) if (model_ema is not None): if args.model_ema: _load_checkpoint_for_ema(model_ema, client_states['model_ema'])
def create_ds_config(args): args.deepspeed_config = os.path.join(args.output_dir, 'deepspeed_config.json') with open(args.deepspeed_config, mode='w') as writer: ds_config = {'train_batch_size': ((args.batch_size * args.update_freq) * get_world_size()), 'train_micro_batch_size_per_gpu': args.batch_size, 'steps_per_print': 1000, 'optimizer': {'type': 'Adam', 'adam_w_mode': True, 'params': {'lr': args.lr, 'weight_decay': args.weight_decay, 'bias_correction': True, 'betas': [0.9, 0.999], 'eps': 1e-08}}, 'fp16': {'enabled': True, 'loss_scale': 0, 'initial_scale_power': 7, 'loss_scale_window': 128}} writer.write(json.dumps(ds_config, indent=2))
def multiple_samples_collate(batch, fold=False): '\n Collate function for repeated augmentation. Each instance in the batch has\n more than one sample.\n Args:\n batch (tuple or list): data batch to collate.\n Returns:\n (tuple): collated data batch.\n ' (inputs, labels, video_idx, extra_data) = zip(*batch) inputs = [item for sublist in inputs for item in sublist] labels = [item for sublist in labels for item in sublist] video_idx = [item for sublist in video_idx for item in sublist] (inputs, labels, video_idx, extra_data) = (default_collate(inputs), default_collate(labels), default_collate(video_idx), default_collate(extra_data)) if fold: return ([inputs], labels, video_idx, extra_data) else: return (inputs, labels, video_idx, extra_data)
def multiple_pretrain_samples_collate(batch, fold=False): '\n Collate function for repeated augmentation. Each instance in the batch has\n more than one sample.\n Args:\n batch (tuple or list): data batch to collate.\n Returns:\n (tuple): collated data batch.\n ' (inputs_0, inputs_1, masks) = zip(*batch) inputs_0 = [item for sublist in inputs_0 for item in sublist] inputs_1 = [item for sublist in inputs_1 for item in sublist] masks = [item for sublist in masks for item in sublist] (inputs_0, inputs_1, masks) = (default_collate(inputs_0), default_collate(inputs_1), default_collate(masks)) if fold: return ([inputs_0], [inputs_1], masks) else: return (inputs_0, inputs_1, masks)
class Config(object): 'Base configuration class. For custom configurations, create a\n sub-class that inherits from this one and override properties\n that need to be changed.\n ' NAME = None GPU_COUNT = 1 IMAGES_PER_GPU = 2 STEPS_PER_EPOCH = 1000 VALIDATION_STEPS = 200 IMAGE_MIN_DIM = 400 IMAGE_MAX_DIM = (4096 * 2) IMAGE_PADDING = False PATCH_SIZE = 128 MARGINAL_PIXEL = 32 MEAN_PIXEL = np.array([117.3]) LEARNING_RATE = 0.0001 LEARNING_MOMENTUM = 0.9 WEIGHT_DECAY = 0.001 def __init__(self): 'Set values of computed attributes.' self.BATCH_SIZE = (self.IMAGES_PER_GPU * self.GPU_COUNT) self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, 3]) def display(self): 'Display Configuration values.' print('\nConfigurations:') for a in dir(self): if ((not a.startswith('__')) and (not callable(getattr(self, a)))): print('{:30} {}'.format(a, getattr(self, a))) print('\n')
class ParallelModel(KM.Model): 'Subclasses the standard Keras Model and adds multi-GPU support.\n It works by creating a copy of the model on each GPU. Then it slices\n the inputs and sends a slice to each copy of the model, and then\n merges the outputs together and applies the loss on the combined\n outputs.\n ' def __init__(self, keras_model, gpu_count): 'Class constructor.\n keras_model: The Keras model to parallelize\n gpu_count: Number of GPUs. Must be > 1\n ' super(ParallelModel, self).__init__() self.inner_model = keras_model self.gpu_count = gpu_count merged_outputs = self.make_parallel() super(ParallelModel, self).__init__(inputs=self.inner_model.inputs, outputs=merged_outputs) def __getattribute__(self, attrname): "Redirect loading and saving methods to the inner model. That's where\n the weights are stored." if (('load' in attrname) or ('save' in attrname)): return getattr(self.inner_model, attrname) return super(ParallelModel, self).__getattribute__(attrname) def summary(self, *args, **kwargs): 'Override summary() to display summaries of both, the wrapper\n and inner models.' super(ParallelModel, self).summary(*args, **kwargs) self.inner_model.summary(*args, **kwargs) def make_parallel(self): 'Creates a new wrapper model that consists of multiple replicas of\n the original model placed on different GPUs.\n ' input_slices = {name: tf.split(x, self.gpu_count) for (name, x) in zip(self.inner_model.input_names, self.inner_model.inputs)} output_names = self.inner_model.output_names outputs_all = [] for i in range(len(self.inner_model.outputs)): outputs_all.append([]) for i in range(self.gpu_count): with tf.device(('/gpu:%d' % i)): with tf.name_scope(('tower_%d' % i)): zipped_inputs = zip(self.inner_model.input_names, self.inner_model.inputs) inputs = [KL.Lambda((lambda s: input_slices[name][i]), output_shape=(lambda s: ((None,) + s[1:])))(tensor) for (name, tensor) in zipped_inputs] outputs = self.inner_model(inputs) if (not isinstance(outputs, list)): outputs = [outputs] for (l, o) in enumerate(outputs): outputs_all[l].append(o) with tf.device('/cpu:0'): merged = [] for (outputs, name) in zip(outputs_all, output_names): def add_dim(tensor): "Add a dimension to tensors that don't have any." if (K.int_shape(tensor) == ()): return KL.Lambda((lambda t: K.reshape(t, [1, 1])))(tensor) return tensor outputs = list(map(add_dim, outputs)) merged.append(KL.Concatenate(axis=0, name=name)(outputs)) return merged
class CocoConfig(Config): 'Configuration for training on MS COCO.\n Derives from the base Config class and overrides values specific\n to the COCO dataset.\n ' NAME = 'coco' IMAGES_PER_GPU = 32 GPU_COUNT = 2
class CocoDataset(utils.Dataset): def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None, class_map=None, return_coco=False, auto_download=False): 'Load a subset of the COCO dataset.\n dataset_dir: The root directory of the COCO dataset.\n subset: What to load (train, val, minival, valminusminival)\n year: What dataset year to load (2014, 2017) as a string, not an integer\n class_ids: If provided, only loads images that have the given classes.\n class_map: TODO: Not implemented yet. Supports maping classes from\n different datasets to the same class ID.\n return_coco: If True, returns the COCO object.\n auto_download: Automatically download and unzip MS-COCO images and annotations\n ' if (auto_download is True): self.auto_download(dataset_dir, subset, year) coco = COCO('{}/annotations/instances_{}{}.json'.format(dataset_dir, subset, year)) if ((subset == 'minival') or (subset == 'valminusminival')): subset = 'val' image_dir = '{}/{}{}'.format(dataset_dir, subset, year) if (not class_ids): class_ids = sorted(coco.getCatIds()) if class_ids: image_ids = [] for id in class_ids: image_ids.extend(list(coco.getImgIds(catIds=[id]))) image_ids = list(set(image_ids)) else: image_ids = list(coco.imgs.keys()) for i in class_ids: self.add_class('coco', i, coco.loadCats(i)[0]['name']) for i in image_ids: self.add_image('coco', image_id=i, path=os.path.join(image_dir, coco.imgs[i]['file_name']), width=coco.imgs[i]['width'], height=coco.imgs[i]['height'], annotations=coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None))) if return_coco: return coco
class Dataset(object): 'The base class for dataset classes.\n To use it, create a new class that adds functions specific to the dataset\n you want to use. For example:\n\n class CatsAndDogsDataset(Dataset):\n def load_cats_and_dogs(self):\n ...\n def load_mask(self, image_id):\n ...\n def image_reference(self, image_id):\n ...\n\n See COCODataset and ShapesDataset as examples.\n ' def __init__(self, class_map=None): self._image_ids = [] self.image_info = [] self.class_info = [{'source': '', 'id': 0, 'name': 'BG'}] self.source_class_ids = {} def add_class(self, source, class_id, class_name): assert ('.' not in source), 'Source name cannot contain a dot' for info in self.class_info: if ((info['source'] == source) and (info['id'] == class_id)): return self.class_info.append({'source': source, 'id': class_id, 'name': class_name}) def add_image(self, source, image_id, path, **kwargs): image_info = {'id': image_id, 'source': source, 'path': path} image_info.update(kwargs) self.image_info.append(image_info) def image_reference(self, image_id): 'Return a link to the image in its source Website or details about\n the image that help looking it up or debugging it.\n\n Override for your dataset, but pass to this function\n if you encounter images not in your dataset.\n ' return '' def prepare(self, class_map=None): 'Prepares the Dataset class for use.\n\n TODO: class map is not supported yet. When done, it should handle mapping\n classes from different datasets to the same class ID.\n ' def clean_name(name): 'Returns a shorter version of object names for cleaner display.' return ','.join(name.split(',')[:1]) self.num_classes = len(self.class_info) self.class_ids = np.arange(self.num_classes) self.class_names = [clean_name(c['name']) for c in self.class_info] self.num_images = len(self.image_info) self._image_ids = np.arange(self.num_images) self.class_from_source_map = {'{}.{}'.format(info['source'], info['id']): id for (info, id) in zip(self.class_info, self.class_ids)} self.sources = list(set([i['source'] for i in self.class_info])) self.source_class_ids = {} for source in self.sources: self.source_class_ids[source] = [] for (i, info) in enumerate(self.class_info): if ((i == 0) or (source == info['source'])): self.source_class_ids[source].append(i) def map_source_class_id(self, source_class_id): 'Takes a source class ID and returns the int class ID assigned to it.\n\n For example:\n dataset.map_source_class_id("coco.12") -> 23\n ' return self.class_from_source_map[source_class_id] def get_source_class_id(self, class_id, source): 'Map an internal class ID to the corresponding class ID in the source dataset.' info = self.class_info[class_id] assert (info['source'] == source) return info['id'] def append_data(self, class_info, image_info): self.external_to_class_id = {} for (i, c) in enumerate(self.class_info): for (ds, id) in c['map']: self.external_to_class_id[(ds + str(id))] = i self.external_to_image_id = {} for (i, info) in enumerate(self.image_info): self.external_to_image_id[(info['ds'] + str(info['id']))] = i @property def image_ids(self): return self._image_ids def source_image_link(self, image_id): "Returns the path or URL to the image.\n Override this to return a URL to the image if it's availble online for easy\n debugging.\n " return self.image_info[image_id]['path'] def load_image(self, image_id): 'Load the specified image and return a [H,W,3] Numpy array.\n ' image = skimage.io.imread(self.image_info[image_id]['path']) if (image.ndim == 3): image = skimage.color.rgb2gray(image) return image def load_mask(self, image_id): 'Load instance masks for the given image.\n\n Different datasets use different ways to store masks. Override this\n method to load instance masks and return them in the form of am\n array of binary masks of shape [height, width, instances].\n\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n a binary mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n ' mask = np.empty([0, 0, 0]) class_ids = np.empty([0], np.int32) return (mask, class_ids)
def resize_image(image, min_dim=None, max_dim=None, padding=False): "\n Resizes an image keeping the aspect ratio.\n\n min_dim: if provided, resizes the image such that it's smaller\n dimension == min_dim\n max_dim: if provided, ensures that the image longest side doesn't\n exceed this value.\n padding: If true, pads image with zeros so it's size is max_dim x max_dim\n\n Returns:\n image: the resized image\n window: (y1, x1, y2, x2). If max_dim is provided, padding might\n be inserted in the returned image. If so, this window is the\n coordinates of the image part of the full image (excluding\n the padding). The x2, y2 pixels are not included.\n scale: The scale factor used to resize the image\n padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]\n " image = scipy.misc.imresize(image, (240, 320)) return image
def parse_args(args=None): parser = argparse.ArgumentParser(description='Training and Testing Knowledge Graph Embedding Models', usage='train.py [<args>] [-h | --help]') parser.add_argument('--cuda', action='store_true', help='use GPU') parser.add_argument('--seed', default=10, type=int) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_valid', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data') parser.add_argument('--countries', action='store_true', help='Use Countries S1/S2/S3 datasets') parser.add_argument('--regions', type=int, nargs='+', default=None, help='Region Id for Countries S1/S2/S3 datasets, DO NOT MANUALLY SET') parser.add_argument('--data_path', type=str, default=None) parser.add_argument('--model', default='HousE', type=str) parser.add_argument('-de', '--double_entity_embedding', action='store_true') parser.add_argument('-dr', '--double_relation_embedding', action='store_true') parser.add_argument('-n', '--negative_sample_size', default=128, type=int) parser.add_argument('-d', '--hidden_dim', default=500, type=int) parser.add_argument('-hd', '--house_dim', default=2, type=int) parser.add_argument('-hn', '--house_num', default=2, type=int) parser.add_argument('-dn', '--housd_num', default=1, type=int) parser.add_argument('-th', '--thred', default=0.5, type=float) parser.add_argument('-g', '--gamma', default=12.0, type=float) parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true') parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float) parser.add_argument('-b', '--batch_size', default=1024, type=int) parser.add_argument('-r', '--regularization', default=0.0, type=float) parser.add_argument('-e_reg', '--ent_reg', default=0.0, type=float) parser.add_argument('-r_reg', '--rel_reg', default=0.0, type=float) parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size') parser.add_argument('--uni_weight', action='store_true', help='Otherwise use subsampling weighting like in word2vec') parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float) parser.add_argument('-cpu', '--cpu_num', default=10, type=int) parser.add_argument('-init', '--init_checkpoint', default=None, type=str) parser.add_argument('-save', '--save_path', default=None, type=str) parser.add_argument('--max_steps', default=100000, type=int) parser.add_argument('--warm_up_steps', default=None, type=int) parser.add_argument('--save_checkpoint_steps', default=10000, type=int) parser.add_argument('--valid_steps', default=10000, type=int) parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps') parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps') parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET') parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET') return parser.parse_args(args)
def override_config(args): '\n Override model and data configuration\n ' with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson: argparse_dict = json.load(fjson) args.countries = argparse_dict['countries'] if (args.data_path is None): args.data_path = argparse_dict['data_path'] args.model = argparse_dict['model'] args.double_entity_embedding = argparse_dict['double_entity_embedding'] args.double_relation_embedding = argparse_dict['double_relation_embedding'] args.hidden_dim = argparse_dict['hidden_dim'] args.test_batch_size = argparse_dict['test_batch_size']
def save_model(model, optimizer, save_variable_list, args, best_valid=False): '\n Save the parameters of the model and the optimizer,\n as well as some other variables such as step and learning_rate\n ' argparse_dict = vars(args) if best_valid: save_path = (args.save_path + '/best_model') else: save_path = args.save_path with open(os.path.join(save_path, 'config.json'), 'w') as fjson: json.dump(argparse_dict, fjson) torch.save({**save_variable_list, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, os.path.join(save_path, 'checkpoint')) entity_embedding = model.entity_embedding.detach().cpu().numpy() np.save(os.path.join(save_path, 'entity_embedding'), entity_embedding) relation_embedding = model.relation_embedding.detach().cpu().numpy() np.save(os.path.join(save_path, 'relation_embedding'), relation_embedding) relation_weight = model.relation_weight.detach().cpu().numpy() np.save(os.path.join(save_path, 'relation_weight'), relation_weight) relation_k_head = model.k_head.detach().cpu().numpy() np.save(os.path.join(save_path, 'relation_k_head'), relation_k_head) relation_k_tail = model.k_tail.detach().cpu().numpy() np.save(os.path.join(save_path, 'relation_k_tail'), relation_k_tail)
def read_triple(file_path, entity2id, relation2id): '\n Read triples and map them into ids.\n ' triples = [] with open(file_path) as fin: for line in fin: (h, r, t) = line.strip().split('\t') triples.append((entity2id[h], relation2id[r], entity2id[t])) return triples
def set_logger(args): '\n Write logs to checkpoint and console\n ' if args.do_train: log_file = os.path.join((args.save_path or args.init_checkpoint), 'train.log') else: log_file = os.path.join((args.save_path or args.init_checkpoint), 'test.log') logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S', filename=log_file, filemode='w') console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics): '\n Print the evaluation logs\n ' for metric in metrics: logging.info(('%s %s at step %d: %f' % (mode, metric, step, metrics[metric])))
def objective(): if (args.seed != 0): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) if ((args.house_dim % 2) == 0): args.house_num = args.house_dim else: args.house_num = (args.house_dim - 1) if ((not args.do_train) and (not args.do_valid) and (not args.do_test)): raise ValueError('one of train/val/test mode must be choosed.') if args.init_checkpoint: override_config(args) elif (args.data_path is None): raise ValueError('one of init_checkpoint/data_path must be choosed.') if (args.do_train and (args.save_path is None)): raise ValueError('Where do you want to save your trained model?') 'if args.save_path and not os.path.exists(args.save_path):\n os.makedirs(args.save_path)' with open(os.path.join(args.data_path, 'entities.dict')) as fin: entity2id = dict() for line in fin: (eid, entity) = line.strip().split('\t') entity2id[entity] = int(eid) with open(os.path.join(args.data_path, 'relations.dict')) as fin: relation2id = dict() for line in fin: (rid, relation) = line.strip().split('\t') relation2id[relation] = int(rid) if args.countries: regions = list() with open(os.path.join(args.data_path, 'regions.list')) as fin: for line in fin: region = line.strip() regions.append(entity2id[region]) args.regions = regions nentity = len(entity2id) nrelation = len(relation2id) args.nentity = nentity args.nrelation = nrelation logging.info(('Parameters: %s' % args)) logging.info(('Model: %s' % args.model)) logging.info(('Data Path: %s' % args.data_path)) logging.info(('#entity: %d' % nentity)) logging.info(('#relation: %d' % nrelation)) train_triples = read_triple(os.path.join(args.data_path, 'train.txt'), entity2id, relation2id) logging.info(('#train: %d' % len(train_triples))) valid_triples = read_triple(os.path.join(args.data_path, 'valid.txt'), entity2id, relation2id) logging.info(('#valid: %d' % len(valid_triples))) test_triples = read_triple(os.path.join(args.data_path, 'test.txt'), entity2id, relation2id) logging.info(('#test: %d' % len(test_triples))) all_true_triples = ((train_triples + valid_triples) + test_triples) kge_model = KGEModel(model_name=args.model, nentity=nentity, nrelation=nrelation, hidden_dim=args.hidden_dim, gamma=args.gamma, house_dim=args.house_dim, house_num=args.house_num, housd_num=args.housd_num, thred=args.thred, double_entity_embedding=args.double_entity_embedding, double_relation_embedding=args.double_relation_embedding) logging.info('Model Parameter Configuration:') for (name, param) in kge_model.named_parameters(): logging.info(('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))) if args.cuda: kge_model = kge_model.cuda() if args.do_train: train_dataloader_head = DataLoader(TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, 'head-batch'), batch_size=args.batch_size, shuffle=True, num_workers=max(1, (args.cpu_num // 2)), collate_fn=TrainDataset.collate_fn) train_dataloader_tail = DataLoader(TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, 'tail-batch'), batch_size=args.batch_size, shuffle=True, num_workers=max(1, (args.cpu_num // 2)), collate_fn=TrainDataset.collate_fn) train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail) current_learning_rate = args.learning_rate optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), kge_model.parameters()), lr=current_learning_rate) if args.warm_up_steps: warm_up_steps = args.warm_up_steps else: warm_up_steps = (args.max_steps // 2) if args.init_checkpoint: logging.info(('Loading checkpoint %s...' % args.init_checkpoint)) checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint')) init_step = checkpoint['step'] kge_model.load_state_dict(checkpoint['model_state_dict']) if args.do_train: current_learning_rate = checkpoint['current_learning_rate'] warm_up_steps = checkpoint['warm_up_steps'] optimizer.load_state_dict(checkpoint['optimizer_state_dict']) else: logging.info(('Ramdomly Initializing %s Model...' % args.model)) init_step = 0 step = init_step logging.info('Start Training...') logging.info(('init_step = %d' % init_step)) logging.info(('batch_size = %d' % args.batch_size)) logging.info(('negative_adversarial_sampling = %d' % args.negative_adversarial_sampling)) logging.info(('hidden_dim = %d' % args.hidden_dim)) logging.info(('gamma = %f' % args.gamma)) logging.info(('negative_adversarial_sampling = %s' % str(args.negative_adversarial_sampling))) if args.negative_adversarial_sampling: logging.info(('adversarial_temperature = %f' % args.adversarial_temperature)) if args.do_train: logging.info(('learning_rate = %d' % current_learning_rate)) training_logs = [] best_mrr = 0 kill_cnt = 0 for step in range(init_step, (args.max_steps + 1)): log = kge_model.train_step(kge_model, optimizer, train_iterator, args) training_logs.append(log) if (step >= warm_up_steps): current_learning_rate = (current_learning_rate / 10) logging.info(('Change learning_rate to %f at step %d' % (current_learning_rate, step))) optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), kge_model.parameters()), lr=current_learning_rate) warm_up_steps = (warm_up_steps * 2) if ((step % args.save_checkpoint_steps) == 0): save_variable_list = {'step': step, 'current_learning_rate': current_learning_rate, 'warm_up_steps': warm_up_steps} save_model(kge_model, optimizer, save_variable_list, args) if ((step % args.log_steps) == 0): metrics = {} for metric in training_logs[0].keys(): metrics[metric] = (sum([log[metric] for log in training_logs]) / len(training_logs)) log_metrics('Training average', step, metrics) training_logs = [] if (args.do_valid and ((step % args.valid_steps) == 0)): kill_cnt += 1 logging.info('Evaluating on Valid Dataset...') metrics = kge_model.test_step(kge_model, valid_triples, all_true_triples, args) log_metrics('Valid', step, metrics) if (metrics['MRR'] > best_mrr): save_variable_list = {'step': step, 'current_learning_rate': current_learning_rate, 'warm_up_steps': warm_up_steps} save_model(kge_model, optimizer, save_variable_list, args, True) best_mrr = metrics['MRR'] kill_cnt = 0 if args.do_test: logging.info('Evaluating on Test Dataset...') metrics = kge_model.test_step(kge_model, test_triples, all_true_triples, args) log_metrics('Test', step, metrics) if (kill_cnt >= 5): logging.info(('Early Stop at step %d' % step)) break save_variable_list = {'step': step, 'current_learning_rate': current_learning_rate, 'warm_up_steps': warm_up_steps} save_model(kge_model, optimizer, save_variable_list, args) if args.evaluate_train: logging.info('Evaluating on Training Dataset...') metrics = kge_model.test_step(kge_model, train_triples, all_true_triples, args) log_metrics('Test', step, metrics) if args.do_valid: logging.info('Evaluating on Valid Dataset with Best Model...') checkpoint = torch.load(os.path.join((args.save_path + '/best_model'), 'checkpoint')) best_step = checkpoint['step'] kge_model.load_state_dict(checkpoint['model_state_dict']) metrics = kge_model.test_step(kge_model, valid_triples, all_true_triples, args) log_metrics('Valid', best_step, metrics) if args.do_test: logging.info('Evaluating on Test Dataset with Best Model...') checkpoint = torch.load(os.path.join((args.save_path + '/best_model'), 'checkpoint')) best_step = checkpoint['step'] kge_model.load_state_dict(checkpoint['model_state_dict']) metrics = kge_model.test_step(kge_model, test_triples, all_true_triples, args) log_metrics('Test', best_step, metrics) final_result = metrics['MRR'] return final_result
def t_start(j, Js=[(1, 2), (3, 4), (5, 6)], Trange=(1, 10)): '\n Helper for `E_gt_func`\n \n :param j: index from 0 to len(Js) (included) on which to get the start\n :param Js: ground truth events, as a list of couples\n :param Trange: range of the series where Js is included\n :return: generalized start such that the middle of t_start and t_stop \n always gives the affiliation zone\n ' b = max(Trange) n = len(Js) if (j == n): return ((2 * b) - t_stop((n - 1), Js, Trange)) else: return Js[j][0]
def t_stop(j, Js=[(1, 2), (3, 4), (5, 6)], Trange=(1, 10)): '\n Helper for `E_gt_func`\n \n :param j: index from 0 to len(Js) (included) on which to get the stop\n :param Js: ground truth events, as a list of couples\n :param Trange: range of the series where Js is included\n :return: generalized stop such that the middle of t_start and t_stop \n always gives the affiliation zone\n ' if (j == (- 1)): a = min(Trange) return ((2 * a) - t_start(0, Js, Trange)) else: return Js[j][1]
def E_gt_func(j, Js, Trange): '\n Get the affiliation zone of element j of the ground truth\n \n :param j: index from 0 to len(Js) (excluded) on which to get the zone\n :param Js: ground truth events, as a list of couples\n :param Trange: range of the series where Js is included, can \n be (-math.inf, math.inf) for distance measures\n :return: affiliation zone of element j of the ground truth represented\n as a couple\n ' range_left = ((t_stop((j - 1), Js, Trange) + t_start(j, Js, Trange)) / 2) range_right = ((t_stop(j, Js, Trange) + t_start((j + 1), Js, Trange)) / 2) return (range_left, range_right)
def get_all_E_gt_func(Js, Trange): '\n Get the affiliation partition from the ground truth point of view\n \n :param Js: ground truth events, as a list of couples\n :param Trange: range of the series where Js is included, can \n be (-math.inf, math.inf) for distance measures\n :return: affiliation partition of the events\n ' E_gt = [E_gt_func(j, Js, Trange) for j in range(len(Js))] return E_gt
def affiliation_partition(Is=[(1, 1.5), (2, 5), (5, 6), (8, 9)], E_gt=[(1, 2.5), (2.5, 4.5), (4.5, 10)]): '\n Cut the events into the affiliation zones\n The presentation given here is from the ground truth point of view,\n but it is also used in the reversed direction in the main function.\n \n :param Is: events as a list of couples\n :param E_gt: range of the affiliation zones\n :return: a list of list of intervals (each interval represented by either \n a couple or None for empty interval). The outer list is indexed by each\n affiliation zone of `E_gt`. The inner list is indexed by the events of `Is`.\n ' out = ([None] * len(E_gt)) for j in range(len(E_gt)): E_gt_j = E_gt[j] discarded_idx_before = [(I[1] < E_gt_j[0]) for I in Is] discarded_idx_after = [(I[0] > E_gt_j[1]) for I in Is] kept_index = [(not (a or b)) for (a, b) in zip(discarded_idx_before, discarded_idx_after)] Is_j = [x for (x, y) in zip(Is, kept_index)] out[j] = [interval_intersection(I, E_gt[j]) for I in Is_j] return out
def interval_length(J=(1, 2)): '\n Length of an interval\n \n :param J: couple representating the start and stop of an interval, or None\n :return: length of the interval, and 0 for a None interval\n ' if (J is None): return 0 return (J[1] - J[0])
def sum_interval_lengths(Is=[(1, 2), (3, 4), (5, 6)]): '\n Sum of length of the intervals\n \n :param Is: list of intervals represented by starts and stops\n :return: sum of the interval length\n ' return sum([interval_length(I) for I in Is])
def interval_intersection(I=(1, 3), J=(2, 4)): '\n Intersection between two intervals I and J\n I and J should be either empty or represent a positive interval (no point)\n \n :param I: an interval represented by start and stop\n :param J: a second interval of the same form\n :return: an interval representing the start and stop of the intersection (or None if empty)\n ' if (I is None): return None if (J is None): return None I_inter_J = (max(I[0], J[0]), min(I[1], J[1])) if (I_inter_J[0] >= I_inter_J[1]): return None else: return I_inter_J
def interval_subset(I=(1, 3), J=(0, 6)): '\n Checks whether I is a subset of J\n \n :param I: an non empty interval represented by start and stop\n :param J: a second non empty interval of the same form\n :return: True if I is a subset of J\n ' if ((I[0] >= J[0]) and (I[1] <= J[1])): return True else: return False
def cut_into_three_func(I, J): '\n Cut an interval I into a partition of 3 subsets:\n the elements before J,\n the elements belonging to J,\n and the elements after J\n \n :param I: an interval represented by start and stop, or None for an empty one\n :param J: a non empty interval\n :return: a triplet of three intervals, each represented by either (start, stop) or None\n ' if (I is None): return (None, None, None) I_inter_J = interval_intersection(I, J) if (I == I_inter_J): I_before = None I_after = None elif (I[1] <= J[0]): I_before = I I_after = None elif (I[0] >= J[1]): I_before = None I_after = I elif ((I[0] <= J[0]) and (I[1] >= J[1])): I_before = (I[0], I_inter_J[0]) I_after = (I_inter_J[1], I[1]) elif (I[0] <= J[0]): I_before = (I[0], I_inter_J[0]) I_after = None elif (I[1] >= J[1]): I_before = None I_after = (I_inter_J[1], I[1]) else: raise ValueError('unexpected unconsidered case') return (I_before, I_inter_J, I_after)
def get_pivot_j(I, J): "\n Get the single point of J that is the closest to I, called 'pivot' here,\n with the requirement that I should be outside J\n \n :param I: a non empty interval (start, stop)\n :param J: another non empty interval, with empty intersection with I\n :return: the element j of J that is the closest to I\n " if (interval_intersection(I, J) is not None): raise ValueError('I and J should have a void intersection') j_pivot = None if (max(I) <= min(J)): j_pivot = min(J) elif (min(I) >= max(J)): j_pivot = max(J) else: raise ValueError('I should be outside J') return j_pivot
def integral_mini_interval(I, J): "\n In the specific case where interval I is located outside J,\n integral of distance from x to J over the interval x \\in I.\n This is the *integral* i.e. the sum.\n It's not the mean (not divided by the length of I yet)\n \n :param I: a interval (start, stop), or None\n :param J: a non empty interval, with empty intersection with I\n :return: the integral of distances d(x, J) over x \\in I\n " if (I is None): return 0 j_pivot = get_pivot_j(I, J) a = min(I) b = max(I) return ((b - a) * abs((j_pivot - ((a + b) / 2))))
def integral_interval_distance(I, J): "\n For any non empty intervals I, J, compute the\n integral of distance from x to J over the interval x \\in I.\n This is the *integral* i.e. the sum. \n It's not the mean (not divided by the length of I yet)\n The interval I can intersect J or not\n \n :param I: a interval (start, stop), or None\n :param J: a non empty interval\n :return: the integral of distances d(x, J) over x \\in I\n " def f(I_cut): return integral_mini_interval(I_cut, J) def f0(I_middle): return 0 cut_into_three = cut_into_three_func(I, J) d_left = f(cut_into_three[0]) d_middle = f0(cut_into_three[1]) d_right = f(cut_into_three[2]) return ((d_left + d_middle) + d_right)
def integral_mini_interval_P_CDFmethod__min_piece(I, J, E): '\n Helper of `integral_mini_interval_Pprecision_CDFmethod`\n In the specific case where interval I is located outside J,\n compute the integral $\\int_{d_min}^{d_max} \\min(m, x) dx$, with:\n - m the smallest distance from J to E,\n - d_min the smallest distance d(x, J) from x \\in I to J\n - d_max the largest distance d(x, J) from x \\in I to J\n \n :param I: a single predicted interval, a non empty interval (start, stop)\n :param J: ground truth interval, a non empty interval, with empty intersection with I\n :param E: the affiliation/influence zone for J, represented as a couple (start, stop)\n :return: the integral $\\int_{d_min}^{d_max} \\min(m, x) dx$\n ' if (interval_intersection(I, J) is not None): raise ValueError('I and J should have a void intersection') if (not interval_subset(J, E)): raise ValueError('J should be included in E') if (not interval_subset(I, E)): raise ValueError('I should be included in E') e_min = min(E) j_min = min(J) j_max = max(J) e_max = max(E) i_min = min(I) i_max = max(I) d_min = max((i_min - j_max), (j_min - i_max)) d_max = max((i_max - j_max), (j_min - i_min)) m = min((j_min - e_min), (e_max - j_max)) A = ((min(d_max, m) ** 2) - (min(d_min, m) ** 2)) B = (max(d_max, m) - max(d_min, m)) C = (((1 / 2) * A) + (m * B)) return C
def integral_mini_interval_Pprecision_CDFmethod(I, J, E): '\n Integral of the probability of distances over the interval I.\n In the specific case where interval I is located outside J,\n compute the integral $\\int_{x \\in I} Fbar(dist(x,J)) dx$.\n This is the *integral* i.e. the sum (not the mean)\n \n :param I: a single predicted interval, a non empty interval (start, stop)\n :param J: ground truth interval, a non empty interval, with empty intersection with I\n :param E: the affiliation/influence zone for J, represented as a couple (start, stop)\n :return: the integral $\\int_{x \\in I} Fbar(dist(x,J)) dx$\n ' integral_min_piece = integral_mini_interval_P_CDFmethod__min_piece(I, J, E) e_min = min(E) j_min = min(J) j_max = max(J) e_max = max(E) i_min = min(I) i_max = max(I) d_min = max((i_min - j_max), (j_min - i_max)) d_max = max((i_max - j_max), (j_min - i_min)) integral_linear_piece = ((1 / 2) * ((d_max ** 2) - (d_min ** 2))) integral_remaining_piece = ((j_max - j_min) * (i_max - i_min)) DeltaI = (i_max - i_min) DeltaE = (e_max - e_min) output = (DeltaI - ((1 / DeltaE) * ((integral_min_piece + integral_linear_piece) + integral_remaining_piece))) return output
def integral_interval_probaCDF_precision(I, J, E): '\n Integral of the probability of distances over the interval I.\n Compute the integral $\\int_{x \\in I} Fbar(dist(x,J)) dx$.\n This is the *integral* i.e. the sum (not the mean)\n \n :param I: a single (non empty) predicted interval in the zone of affiliation of J\n :param J: ground truth interval\n :param E: affiliation/influence zone for J\n :return: the integral $\\int_{x \\in I} Fbar(dist(x,J)) dx$\n ' def f(I_cut): if (I_cut is None): return 0 else: return integral_mini_interval_Pprecision_CDFmethod(I_cut, J, E) def f0(I_middle): if (I_middle is None): return 0 else: return (max(I_middle) - min(I_middle)) cut_into_three = cut_into_three_func(I, J) d_left = f(cut_into_three[0]) d_middle = f0(cut_into_three[1]) d_right = f(cut_into_three[2]) return ((d_left + d_middle) + d_right)
def cut_J_based_on_mean_func(J, e_mean): '\n Helper function for the recall.\n Partition J into two intervals: before and after e_mean\n (e_mean represents the center element of E the zone of affiliation)\n \n :param J: ground truth interval\n :param e_mean: a float number (center value of E)\n :return: a couple partitionning J into (J_before, J_after)\n ' if (J is None): J_before = None J_after = None elif (e_mean >= max(J)): J_before = J J_after = None elif (e_mean <= min(J)): J_before = None J_after = J else: J_before = (min(J), e_mean) J_after = (e_mean, max(J)) return (J_before, J_after)
def integral_mini_interval_Precall_CDFmethod(I, J, E): '\n Integral of the probability of distances over the interval J.\n In the specific case where interval J is located outside I,\n compute the integral $\\int_{y \\in J} Fbar_y(dist(y,I)) dy$.\n This is the *integral* i.e. the sum (not the mean)\n \n :param I: a single (non empty) predicted interval\n :param J: ground truth (non empty) interval, with empty intersection with I\n :param E: the affiliation/influence zone for J, represented as a couple (start, stop)\n :return: the integral $\\int_{y \\in J} Fbar_y(dist(y,I)) dy$\n ' i_pivot = get_pivot_j(J, I) e_min = min(E) e_max = max(E) e_mean = ((e_min + e_max) / 2) if (i_pivot <= min(E)): return 0 elif (i_pivot >= max(E)): return 0 cut_J_based_on_e_mean = cut_J_based_on_mean_func(J, e_mean) J_before = cut_J_based_on_e_mean[0] J_after = cut_J_based_on_e_mean[1] iemin_mean = ((e_min + i_pivot) / 2) cut_Jbefore_based_on_iemin_mean = cut_J_based_on_mean_func(J_before, iemin_mean) J_before_closeE = cut_Jbefore_based_on_iemin_mean[0] J_before_closeI = cut_Jbefore_based_on_iemin_mean[1] iemax_mean = ((e_max + i_pivot) / 2) cut_Jafter_based_on_iemax_mean = cut_J_based_on_mean_func(J_after, iemax_mean) J_after_closeI = cut_Jafter_based_on_iemax_mean[0] J_after_closeE = cut_Jafter_based_on_iemax_mean[1] if (J_before_closeE is not None): j_before_before_min = min(J_before_closeE) j_before_before_max = max(J_before_closeE) else: j_before_before_min = math.nan j_before_before_max = math.nan if (J_before_closeI is not None): j_before_after_min = min(J_before_closeI) j_before_after_max = max(J_before_closeI) else: j_before_after_min = math.nan j_before_after_max = math.nan if (J_after_closeI is not None): j_after_before_min = min(J_after_closeI) j_after_before_max = max(J_after_closeI) else: j_after_before_min = math.nan j_after_before_max = math.nan if (J_after_closeE is not None): j_after_after_min = min(J_after_closeE) j_after_after_max = max(J_after_closeE) else: j_after_after_min = math.nan j_after_after_max = math.nan if (i_pivot >= max(J)): part1_before_closeE = ((i_pivot - e_min) * (j_before_before_max - j_before_before_min)) part2_before_closeI = (((2 * i_pivot) * (j_before_after_max - j_before_after_min)) - ((j_before_after_max ** 2) - (j_before_after_min ** 2))) part3_after_closeI = (((2 * i_pivot) * (j_after_before_max - j_after_before_min)) - ((j_after_before_max ** 2) - (j_after_before_min ** 2))) part4_after_closeE = (((e_max + i_pivot) * (j_after_after_max - j_after_after_min)) - ((j_after_after_max ** 2) - (j_after_after_min ** 2))) out_parts = [part1_before_closeE, part2_before_closeI, part3_after_closeI, part4_after_closeE] elif (i_pivot <= min(J)): part1_before_closeE = (((j_before_before_max ** 2) - (j_before_before_min ** 2)) - ((e_min + i_pivot) * (j_before_before_max - j_before_before_min))) part2_before_closeI = (((j_before_after_max ** 2) - (j_before_after_min ** 2)) - ((2 * i_pivot) * (j_before_after_max - j_before_after_min))) part3_after_closeI = (((j_after_before_max ** 2) - (j_after_before_min ** 2)) - ((2 * i_pivot) * (j_after_before_max - j_after_before_min))) part4_after_closeE = ((e_max - i_pivot) * (j_after_after_max - j_after_after_min)) out_parts = [part1_before_closeE, part2_before_closeI, part3_after_closeI, part4_after_closeE] else: raise ValueError('The i_pivot should be outside J') out_integral_min_dm_plus_d = _sum_wo_nan(out_parts) DeltaJ = (max(J) - min(J)) DeltaE = (max(E) - min(E)) C = (DeltaJ - ((1 / DeltaE) * out_integral_min_dm_plus_d)) return C
def integral_interval_probaCDF_recall(I, J, E): '\n Integral of the probability of distances over the interval J.\n Compute the integral $\\int_{y \\in J} Fbar_y(dist(y,I)) dy$.\n This is the *integral* i.e. the sum (not the mean)\n\n :param I: a single (non empty) predicted interval\n :param J: ground truth (non empty) interval\n :param E: the affiliation/influence zone for J\n :return: the integral $\\int_{y \\in J} Fbar_y(dist(y,I)) dy$\n ' def f(J_cut): if (J_cut is None): return 0 else: return integral_mini_interval_Precall_CDFmethod(I, J_cut, E) def f0(J_middle): if (J_middle is None): return 0 else: return (max(J_middle) - min(J_middle)) cut_into_three = cut_into_three_func(J, I) d_left = f(cut_into_three[0]) d_middle = f0(cut_into_three[1]) d_right = f(cut_into_three[2]) return ((d_left + d_middle) + d_right)
def affiliation_precision_distance(Is=[(1, 2), (3, 4), (5, 6)], J=(2, 5.5)): '\n Compute the individual average distance from Is to a single ground truth J\n \n :param Is: list of predicted events within the affiliation zone of J\n :param J: couple representating the start and stop of a ground truth interval\n :return: individual average precision directed distance number\n ' if all([(I is None) for I in Is]): return math.nan return (sum([integral_interval_distance(I, J) for I in Is]) / sum_interval_lengths(Is))
def affiliation_precision_proba(Is=[(1, 2), (3, 4), (5, 6)], J=(2, 5.5), E=(0, 8)): '\n Compute the individual precision probability from Is to a single ground truth J\n \n :param Is: list of predicted events within the affiliation zone of J\n :param J: couple representating the start and stop of a ground truth interval\n :param E: couple representing the start and stop of the zone of affiliation of J\n :return: individual precision probability in [0, 1], or math.nan if undefined\n ' if all([(I is None) for I in Is]): return math.nan return (sum([integral_interval_probaCDF_precision(I, J, E) for I in Is]) / sum_interval_lengths(Is))
def affiliation_recall_distance(Is=[(1, 2), (3, 4), (5, 6)], J=(2, 5.5)): '\n Compute the individual average distance from a single J to the predictions Is\n \n :param Is: list of predicted events within the affiliation zone of J\n :param J: couple representating the start and stop of a ground truth interval\n :return: individual average recall directed distance number\n ' Is = [I for I in Is if (I is not None)] if (len(Is) == 0): return math.inf E_gt_recall = get_all_E_gt_func(Is, ((- math.inf), math.inf)) Js = affiliation_partition([J], E_gt_recall) return (sum([integral_interval_distance(J[0], I) for (I, J) in zip(Is, Js)]) / interval_length(J))
def affiliation_recall_proba(Is=[(1, 2), (3, 4), (5, 6)], J=(2, 5.5), E=(0, 8)): '\n Compute the individual recall probability from a single ground truth J to Is\n \n :param Is: list of predicted events within the affiliation zone of J\n :param J: couple representating the start and stop of a ground truth interval\n :param E: couple representing the start and stop of the zone of affiliation of J\n :return: individual recall probability in [0, 1]\n ' Is = [I for I in Is if (I is not None)] if (len(Is) == 0): return 0 E_gt_recall = get_all_E_gt_func(Is, E) Js = affiliation_partition([J], E_gt_recall) return (sum([integral_interval_probaCDF_recall(I, J[0], E) for (I, J) in zip(Is, Js)]) / interval_length(J))
def convert_vector_to_events(vector=[0, 1, 1, 0, 0, 1, 0]): '\n Convert a binary vector (indicating 1 for the anomalous instances)\n to a list of events. The events are considered as durations,\n i.e. setting 1 at index i corresponds to an anomalous interval [i, i+1).\n \n :param vector: a list of elements belonging to {0, 1}\n :return: a list of couples, each couple representing the start and stop of\n each event\n ' positive_indexes = [idx for (idx, val) in enumerate(vector) if (val > 0)] events = [] for (k, g) in groupby(enumerate(positive_indexes), (lambda ix: (ix[0] - ix[1]))): cur_cut = list(map(itemgetter(1), g)) events.append((cur_cut[0], cur_cut[(- 1)])) events = [(x, (y + 1)) for (x, y) in events] return events
def infer_Trange(events_pred, events_gt): '\n Given the list of events events_pred and events_gt, get the\n smallest possible Trange corresponding to the start and stop indexes \n of the whole series.\n Trange will not influence the measure of distances, but will impact the\n measures of probabilities.\n \n :param events_pred: a list of couples corresponding to predicted events\n :param events_gt: a list of couples corresponding to ground truth events\n :return: a couple corresponding to the smallest range containing the events\n ' if (len(events_gt) == 0): raise ValueError('The gt events should contain at least one event') if (len(events_pred) == 0): return infer_Trange(events_gt, events_gt) min_pred = min([x[0] for x in events_pred]) min_gt = min([x[0] for x in events_gt]) max_pred = max([x[1] for x in events_pred]) max_gt = max([x[1] for x in events_gt]) Trange = (min(min_pred, min_gt), max(max_pred, max_gt)) return Trange
def has_point_anomalies(events): '\n Checking whether events contain point anomalies, i.e.\n events starting and stopping at the same time.\n \n :param events: a list of couples corresponding to predicted events\n :return: True is the events have any point anomalies, False otherwise\n ' if (len(events) == 0): return False return (min([(x[1] - x[0]) for x in events]) == 0)
def _sum_wo_nan(vec): '\n Sum of elements, ignoring math.isnan ones\n \n :param vec: vector of floating numbers\n :return: sum of the elements, ignoring math.isnan ones\n ' vec_wo_nan = [e for e in vec if (not math.isnan(e))] return sum(vec_wo_nan)
def _len_wo_nan(vec): '\n Count of elements, ignoring math.isnan ones\n \n :param vec: vector of floating numbers\n :return: count of the elements, ignoring math.isnan ones\n ' vec_wo_nan = [e for e in vec if (not math.isnan(e))] return len(vec_wo_nan)
def read_gz_data(filename='data/machinetemp_groundtruth.gz'): '\n Load a file compressed with gz, such that each line of the\n file is either 0 (representing a normal instance) or 1 (representing)\n an anomalous instance.\n :param filename: file path to the gz compressed file\n :return: list of integers with either 0 or 1\n ' with gzip.open(filename, 'rb') as f: content = f.read().splitlines() content = [int(x) for x in content] return content
def read_all_as_events(): '\n Load the files contained in the folder `data/` and convert\n to events. The length of the series is kept.\n The convention for the file name is: `dataset_algorithm.gz`\n :return: two dictionaries:\n - the first containing the list of events for each dataset and algorithm,\n - the second containing the range of the series for each dataset\n ' filepaths = glob.glob('data/*.gz') datasets = dict() Tranges = dict() for filepath in filepaths: vector = read_gz_data(filepath) events = convert_vector_to_events(vector) cut_filepath = os.path.split(filepath)[1].split('_') data_name = cut_filepath[0] algo_name = cut_filepath[1].split('.')[0] if (not (data_name in datasets)): datasets[data_name] = dict() Tranges[data_name] = (0, len(vector)) datasets[data_name][algo_name] = events return (datasets, Tranges)
def f1_func(p, r): '\n Compute the f1 function\n :param p: precision numeric value\n :param r: recall numeric value\n :return: f1 numeric value\n ' return (((2 * p) * r) / (p + r))
def test_events(events): '\n Verify the validity of the input events\n :param events: list of events, each represented by a couple (start, stop)\n :return: None. Raise an error for incorrect formed or non ordered events\n ' if (type(events) is not list): raise TypeError('Input `events` should be a list of couples') if (not all([(type(x) is tuple) for x in events])): raise TypeError('Input `events` should be a list of tuples') if (not all([(len(x) == 2) for x in events])): raise ValueError('Input `events` should be a list of couples (start, stop)') if (not all([(x[0] <= x[1]) for x in events])): raise ValueError('Input `events` should be a list of couples (start, stop) with start <= stop') if (not all([(events[i][1] < events[(i + 1)][0]) for i in range((len(events) - 1))])): raise ValueError('Couples of input `events` should be disjoint and ordered')
def pr_from_events(events_pred, events_gt, Trange): '\n Compute the affiliation metrics including the precision/recall in [0,1],\n along with the individual precision/recall distances and probabilities\n \n :param events_pred: list of predicted events, each represented by a couple\n indicating the start and the stop of the event\n :param events_gt: list of ground truth events, each represented by a couple\n indicating the start and the stop of the event\n :param Trange: range of the series where events_pred and events_gt are included,\n represented as a couple (start, stop)\n :return: dictionary with precision, recall, and the individual metrics\n ' test_events(events_pred) test_events(events_gt) minimal_Trange = infer_Trange(events_pred, events_gt) if (not (Trange[0] <= minimal_Trange[0])): raise ValueError('`Trange` should include all the events') if (not (minimal_Trange[1] <= Trange[1])): raise ValueError('`Trange` should include all the events') if (len(events_gt) == 0): raise ValueError('Input `events_gt` should have at least one event') if (has_point_anomalies(events_pred) or has_point_anomalies(events_gt)): raise ValueError('Cannot manage point anomalies currently') if (Trange is None): raise ValueError('Trange should be indicated (or inferred with the `infer_Trange` function') E_gt = get_all_E_gt_func(events_gt, Trange) aff_partition = affiliation_partition(events_pred, E_gt) d_precision = [affiliation_precision_distance(Is, J) for (Is, J) in zip(aff_partition, events_gt)] d_recall = [affiliation_recall_distance(Is, J) for (Is, J) in zip(aff_partition, events_gt)] p_precision = [affiliation_precision_proba(Is, J, E) for (Is, J, E) in zip(aff_partition, events_gt, E_gt)] p_recall = [affiliation_recall_proba(Is, J, E) for (Is, J, E) in zip(aff_partition, events_gt, E_gt)] if (_len_wo_nan(p_precision) > 0): p_precision_average = (_sum_wo_nan(p_precision) / _len_wo_nan(p_precision)) else: p_precision_average = p_precision[0] p_recall_average = (sum(p_recall) / len(p_recall)) dict_out = dict({'precision': p_precision_average, 'recall': p_recall_average, 'individual_precision_probabilities': p_precision, 'individual_recall_probabilities': p_recall, 'individual_precision_distances': d_precision, 'individual_recall_distances': d_recall}) return dict_out
def produce_all_results(): '\n Produce the affiliation precision/recall for all files\n contained in the `data` repository\n :return: a dictionary indexed by data names, each containing a dictionary\n indexed by algorithm names, each containing the results of the affiliation\n metrics (precision, recall, individual probabilities and distances)\n ' (datasets, Tranges) = read_all_as_events() results = dict() for data_name in datasets.keys(): results_data = dict() for algo_name in datasets[data_name].keys(): if (algo_name != 'groundtruth'): results_data[algo_name] = pr_from_events(datasets[data_name][algo_name], datasets[data_name]['groundtruth'], Tranges[data_name]) results[data_name] = results_data return results
class Config(object): def __init__(self): self.dataset = 'IOpsCompetition' self.input_channels = 1 self.kernel_size = 4 self.stride = 1 self.final_out_channels = 32 self.hidden_size = 64 self.num_layers = 3 self.project_channels = 20 self.dropout = 0.45 self.features_len = 6 self.window_size = 16 self.time_step = 2 self.num_epoch = 5 self.freeze_length_epoch = 2 self.change_center_epoch = 1 self.center_eps = 0.1 self.omega1 = 1 self.omega2 = 0.1 self.beta1 = 0.9 self.beta2 = 0.99 self.lr = 0.0001 self.drop_last = False self.batch_size = 512 self.nu = 0.001 self.detect_nu = 0.0015 self.threshold_determine = 'floating' self.objective = 'soft-boundary' self.loss_type = 'distance' self.augmentation = augmentations()
class augmentations(object): def __init__(self): self.scale_ratio = 1.1 self.jitter_ratio = 0.1
class Config(object): def __init__(self): self.dataset = 'UCR' self.input_channels = 1 self.kernel_size = 8 self.stride = 1 self.final_out_channels = 64 self.hidden_size = 128 self.num_layers = 3 self.project_channels = 32 self.dropout = 0.45 self.features_len = 18 self.window_size = 64 self.time_step = 4 self.num_epoch = 50 self.freeze_length_epoch = 10 self.change_center_epoch = 10 self.center_eps = 0.1 self.omega1 = 1 self.omega2 = 0.1 self.beta1 = 0.9 self.beta2 = 0.99 self.lr = 0.0003 self.drop_last = False self.batch_size = 512 self.nu = 0.01 self.detect_nu = 0.0005 self.threshold_determine = 'one-anomaly' self.objective = 'one-class' self.loss_type = 'distance' self.augmentation = augmentations()