code
stringlengths
17
6.64M
def rmse_base(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, mask): depth_gt[mask] = 1 depth_prediction[mask] = 1 se = ((depth_prediction - depth_gt) ** 2) return torch.mean(torch.sqrt(mask_mean(se, mask, dim=[1, 2, 3])))
def rmse_log_base(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, mask): depth_gt[mask] = 1 depth_prediction[mask] = 1 sle = ((torch.log(depth_prediction) - torch.log(depth_gt)) ** 2) return torch.mean(torch.sqrt(mask_mean(sle, mask, dim=[1, 2, 3])))
def abs_rel_base(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, mask): return mask_mean((torch.abs((depth_prediction - depth_gt)) / depth_gt), mask)
def sq_rel_base(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, mask): return mask_mean((((depth_prediction - depth_gt) ** 2) / depth_gt), mask)
class ConfigParser(): def __init__(self, args, options='', timestamp=True): for opt in options: args.add_argument(*opt.flags, default=None, type=opt.type) args = args.parse_args() self.args = args if args.device: os.environ['CUDA_VISIBLE_DEVICES'] = args.device if (args.resume is None): msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example." assert (args.config is not None), msg_no_cfg self.cfg_fname = Path(args.config) config = read_json(self.cfg_fname) self.resume = None else: self.resume = Path(args.resume) resume_cfg_fname = (self.resume.parent / 'config.json') config = read_json(resume_cfg_fname) if (args.config is not None): config.update(read_json(Path(args.config))) self._config = _update_config(config, options, args) timestamp = (datetime.now().strftime('%m%d_%H%M%S') if timestamp else '') if ('trainer' in self.config): save_dir = Path(self.config['trainer']['save_dir']) if ('timestamp_replacement' in self.config['trainer']): timestamp = self.config['trainer']['timestamp_replacement'] elif ('evaluater' in self.config): save_dir = Path(self.config['evaluater']['save_dir']) if ('timestamp_replacement' in self.config['evaluater']): timestamp = self.config['evaluater']['timestamp_replacement'] elif ('save_dir' in self.config): save_dir = Path(self.config['save_dir']) else: save_dir = Path('../saved') exper_name = self.config['name'] self._save_dir = (((save_dir / 'models') / exper_name) / timestamp) self._log_dir = (((save_dir / 'log') / exper_name) / timestamp) self.save_dir.mkdir(parents=True, exist_ok=True) self.log_dir.mkdir(parents=True, exist_ok=True) write_json(self.config, (self.save_dir / 'config.json')) setup_logging(self.log_dir) self.log_levels = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG} def initialize(self, name, module, *args, **kwargs): "\n finds a function handle with the name given as 'type' in config, and returns the \n instance initialized with corresponding keyword args given as 'args'.\n " module_name = self[name]['type'] module_args = dict(self[name]['args']) assert all([(k not in module_args) for k in kwargs]), 'Overwriting kwargs given in config file is not allowed' module_args.update(kwargs) return getattr(module, module_name)(*args, **module_args) def initialize_list(self, name, module, *args, **kwargs): l = self[name] for to_init in l: module_name = to_init['type'] module_args = dict(to_init['args']) module_args.update(kwargs) (yield getattr(module, module_name)(*args, **module_args)) def __getitem__(self, name): return self.config[name] def get_logger(self, name, verbosity=2): msg_verbosity = 'verbosity option {} is invalid. Valid options are {}.'.format(verbosity, self.log_levels.keys()) assert (verbosity in self.log_levels), msg_verbosity logger = logging.getLogger(name) logger.setLevel(self.log_levels[verbosity]) return logger @property def config(self): return self._config @property def save_dir(self): return self._save_dir @property def log_dir(self): return self._log_dir
def _update_config(config, options, args): for opt in options: value = getattr(args, _get_opt_name(opt.flags)) if (value is not None): _set_by_path(config, opt.target, value) return config
def _get_opt_name(flags): for flg in flags: if flg.startswith('--'): return flg.replace('--', '') return flags[0].replace('--', '')
def _set_by_path(tree, keys, value): 'Set a value in a nested object in tree by sequence of keys.' _get_by_path(tree, keys[:(- 1)])[keys[(- 1)]] = value
def _get_by_path(tree, keys): 'Access a nested object in tree by sequence of keys.' return reduce(getitem, keys, tree)
def main(config, options=[]): seed_rng(0) logger = config.get_logger('train') data_loader = config.initialize('data_loader', module_data) if ('val_data_loader' in config.config): valid_data_loader = config.initialize('val_data_loader', module_data) else: valid_data_loader = data_loader.split_validation() model = config.initialize('arch', module_arch) logger.info(model) logger.info(f'{sum((p.numel() for p in model.parameters() if p.requires_grad))} trainable parameters') logger.info(f'{sum((p.numel() for p in model.parameters()))} total parameters') if ('loss_module' in config.config): loss = config.initialize('loss_module', module_loss) else: loss = getattr(module_loss, config['loss']) metrics = [getattr(module_metric, met) for met in config['metrics']] trainable_params = filter((lambda p: p.requires_grad), model.parameters()) optimizer = config.initialize('optimizer', torch.optim, trainable_params) lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler, optimizer) trainer = Trainer(model, loss, metrics, optimizer, config=config, data_loader=data_loader, valid_data_loader=valid_data_loader, lr_scheduler=lr_scheduler, options=options) trainer.train()
class Trainer(BaseTrainer): def __init__(self, model, loss, metrics, optimizer, config, data_loader, valid_data_loader=None, lr_scheduler=None, options=[]): super().__init__(model, loss, metrics, optimizer, config) self.config = config self.data_loader = data_loader len_epoch = config['trainer'].get('len_epoch', None) if (len_epoch is None): self.len_epoch = len(self.data_loader) else: self.data_loader = inf_loop(data_loader) self.len_epoch = len_epoch self.valid_data_loader = valid_data_loader self.do_validation = (self.valid_data_loader is not None) self.lr_scheduler = lr_scheduler self.log_step = config['trainer'].get('log_step', int(np.sqrt(data_loader.batch_size))) self.val_log_step = config['trainer'].get('val_step', 1) self.roi = config['trainer'].get('roi') self.roi_train = config['trainer'].get('roi_train', self.roi) self.alpha = config['trainer'].get('alpha', None) self.max_distance = config['trainer'].get('max_distance', None) self.val_avg = config['trainer'].get('val_avg', True) self.save_multiple = config['trainer'].get('save_multiple', False) self.invert_output_images = config['trainer'].get('invert_output_images', True) self.wrap_loss_in_module = config['trainer'].get('wrap_loss_in_module', False) self.value_faders = config['trainer'].get('value_faders', {}) self.options = options if self.wrap_loss_in_module: self.loss = LossWrapper(loss_function=self.loss, roi=self.roi, options=self.options) if (isinstance(loss, torch.nn.Module) or self.wrap_loss_in_module): self.module_loss = True self.loss.to(self.device) if (len(self.device_ids) > 1): self.loss.num_devices = len(self.device_ids) self.model = torch.nn.DataParallel(torch.nn.Sequential(self.model.module, self.loss), self.device_ids) else: self.model = torch.nn.Sequential(self.model, self.loss) else: self.module_loss = False self.value_faders = {k: ValueFader(v[0], v[1]) for (k, v) in self.value_faders.items()} def _eval_metrics(self, data_dict, training=False): acc_metrics = np.zeros(len(self.metrics)) for (i, metric) in enumerate(self.metrics): acc_metrics[i] += metric(data_dict, self.roi, self.max_distance) if ((not self.val_avg) or training): self.writer.add_scalar('{}'.format(metric.__name__), acc_metrics[i]) if np.any(np.isnan(acc_metrics)): acc_metrics = np.zeros(len(self.metrics)) valid = np.zeros(len(self.metrics)) else: valid = np.ones(len(self.metrics)) return (acc_metrics, valid) def _train_epoch(self, epoch): self.model.train() total_loss = 0 total_loss_dict = {} total_metrics = np.zeros(len(self.metrics)) total_metrics_valid = np.zeros(len(self.metrics)) fade_values = {k: torch.tensor([fader.get_value(epoch)]) for (k, fader) in self.value_faders.items()} for (batch_idx, (data, target)) in enumerate(self.data_loader): data.update(fade_values) (data, target) = (to(data, self.device), to(target, self.device)) data['target'] = target start_time = time.time() self.optimizer.zero_grad() if (not self.module_loss): data = self.model(data) loss_dict = self.loss(data, self.alpha, self.roi_train, options=self.options) else: (data, loss_dict) = self.model(data) loss_dict = map_fn(loss_dict, torch.sum) loss = loss_dict['loss'] if loss.requires_grad: loss.backward() self.optimizer.step() loss_dict = map_fn(loss_dict, torch.detach) self.writer.set_step((((epoch - 1) * self.len_epoch) + batch_idx)) self.writer.add_scalar('loss', loss.item()) for (loss_component, v) in loss_dict.items(): self.writer.add_scalar(f'loss_{loss_component}', v.item()) total_loss += loss.item() total_loss_dict = operator_on_dict(total_loss_dict, loss_dict, operator.add) (metrics, valid) = self._eval_metrics(data, True) total_metrics += metrics total_metrics_valid += valid if ((self.writer.step % self.log_step) == 0): img_count = min(data['keyframe'].shape[0], 8) self.logger.debug('Train Epoch: {} {} Loss: {:.6f} Loss_dict: {}'.format(epoch, self._progress(batch_idx), loss.item(), loss_dict)) if ('mask' in data): if self.invert_output_images: result = torch.clamp((1 / data['result'][:img_count]), 0, 100).cpu() result /= ((torch.max(result) * 2) / 3) else: result = data['result'][:img_count].cpu() mask = data['mask'][:img_count].cpu() img = torch.cat([result, mask], dim=2) elif self.invert_output_images: img = torch.clamp((1 / data['result'][:img_count]), 0, 100).cpu() else: img = data['result'][:img_count].cpu() self.writer.add_image('input', make_grid(to(data['keyframe'][:img_count], 'cpu'), nrow=2, normalize=True)) self.writer.add_image('output', make_grid(img, nrow=2, normalize=True)) self.writer.add_image('ground_truth', make_grid(to(torch.clamp(infnan_to_zero((1 / data['target'][:img_count])), 0, 100), 'cpu'), nrow=2, normalize=True)) if (batch_idx == self.len_epoch): break log = {'loss': (total_loss / self.len_epoch), 'metrics': (total_metrics / total_metrics_valid).tolist()} for (loss_component, v) in total_loss_dict.items(): log[f'loss_{loss_component}'] = (v.item() / self.len_epoch) if self.do_validation: val_log = self._valid_epoch(epoch) log.update(val_log) if (self.lr_scheduler is not None): self.lr_scheduler.step() return log def _valid_epoch(self, epoch): self.model.eval() total_val_loss = 0 total_val_loss_dict = {} total_val_metrics = np.zeros(len(self.metrics)) total_val_metrics_valid = np.zeros(len(self.metrics)) with torch.no_grad(): for (batch_idx, (data, target)) in enumerate(self.valid_data_loader): (data, target) = (to(data, self.device), to(target, self.device)) data['target'] = target if (not self.module_loss): data = self.model(data) loss_dict = self.loss(data, self.alpha, self.roi_train, options=self.options) else: (data, loss_dict) = self.model(data) loss_dict = map_fn(loss_dict, torch.sum) loss = loss_dict['loss'] img_count = min(data['keyframe'].shape[0], 8) self.writer.set_step((((epoch - 1) * len(self.valid_data_loader)) + batch_idx), 'valid') if (not self.val_avg): self.writer.add_scalar('loss', loss.item()) for (loss_component, v) in loss_dict.items(): self.writer.add_scalar(f'loss_{loss_component}', v.item()) total_val_loss += loss.item() total_val_loss_dict = operator_on_dict(total_val_loss_dict, loss_dict, operator.add) (metrics, valid) = self._eval_metrics(data) total_val_metrics += metrics total_val_metrics_valid += valid if ((batch_idx % self.val_log_step) == 0): if ('mask' in data): if self.invert_output_images: result = torch.clamp((1 / data['result'][:img_count]), 0, 100).cpu() result /= ((torch.max(result) * 2) / 3) else: result = data['result'][:img_count].cpu() mask = data['mask'][:img_count].cpu() img = torch.cat([result, mask], dim=2) elif self.invert_output_images: img = torch.clamp((1 / data['result'][:img_count]), 0, 100).cpu() else: img = data['result'][:img_count].cpu() self.writer.add_image('input', make_grid(to(data['keyframe'][:img_count], 'cpu'), nrow=2, normalize=True)) self.writer.add_image('output', make_grid(img, nrow=2, normalize=True)) self.writer.add_image('ground_truth', make_grid(to(torch.clamp(infnan_to_zero((1 / data['target'][:img_count])), 0, 100), 'cpu'), nrow=2, normalize=True)) if self.val_avg: len_val = len(self.valid_data_loader) self.writer.add_scalar('loss', (total_val_loss / len_val)) for (i, metric) in enumerate(self.metrics): self.writer.add_scalar('{}'.format(metric.__name__), (total_val_metrics[i] / len_val)) for (loss_component, v) in total_val_loss_dict.items(): self.writer.add_scalar(f'loss_{loss_component}', (v.item() / len_val)) result = {'val_loss': (total_val_loss / len(self.valid_data_loader)), 'val_metrics': (total_val_metrics / total_val_metrics_valid).tolist()} for (loss_component, v) in total_val_loss_dict.items(): result[f'val_loss_{loss_component}'] = (v.item() / len(self.valid_data_loader)) return result def _progress(self, batch_idx): base = '[{}/{} ({:.0f}%)]' if hasattr(self.data_loader, 'n_samples'): current = (batch_idx * self.data_loader.batch_size) total = self.data_loader.n_samples else: current = batch_idx total = self.len_epoch return base.format(current, total, ((100.0 * current) / total))
def to(data, device): if isinstance(data, dict): return {k: to(data[k], device) for k in data.keys()} elif isinstance(data, list): return [to(v, device) for v in data] else: return data.to(device)
def infnan_to_zero(t: torch.Tensor()): t[torch.isinf(t)] = 0 t[torch.isnan(t)] = 0 return t
class ConfigParser(): def __init__(self, args, options='', timestamp=True): for opt in options: args.add_argument(*opt.flags, default=None, type=opt.type) args = args.parse_args() self.args = args if args.device: os.environ['CUDA_VISIBLE_DEVICES'] = args.device if (args.resume is None): msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example." assert (args.config is not None), msg_no_cfg self.cfg_fname = Path(args.config) config = read_json(self.cfg_fname) self.resume = None else: self.resume = Path(args.resume) resume_cfg_fname = (self.resume.parent / 'config.json') config = read_json(resume_cfg_fname) if (args.config is not None): config.update(read_json(Path(args.config))) self._config = _update_config(config, options, args) timestamp = (datetime.now().strftime('%m%d_%H%M%S') if timestamp else '') if ('trainer' in self.config): save_dir = Path(self.config['trainer']['save_dir']) if ('timestamp_replacement' in self.config['trainer']): timestamp = self.config['trainer']['timestamp_replacement'] elif ('evaluater' in self.config): save_dir = Path(self.config['evaluater']['save_dir']) if ('timestamp_replacement' in self.config['evaluater']): timestamp = self.config['evaluater']['timestamp_replacement'] elif ('save_dir' in self.config): save_dir = Path(self.config['save_dir']) else: save_dir = Path('../saved') exper_name = self.config['name'] self._save_dir = (((save_dir / 'models') / exper_name) / timestamp) self._log_dir = (((save_dir / 'log') / exper_name) / timestamp) self.save_dir.mkdir(parents=True, exist_ok=True) self.log_dir.mkdir(parents=True, exist_ok=True) write_json(self.config, (self.save_dir / 'config.json')) setup_logging(self.log_dir) self.log_levels = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG} def initialize(self, name, module, *args, **kwargs): "\n finds a function handle with the name given as 'type' in config, and returns the \n instance initialized with corresponding keyword args given as 'args'.\n " module_name = self[name]['type'] module_args = dict(self[name]['args']) assert all([(k not in module_args) for k in kwargs]), 'Overwriting kwargs given in config file is not allowed' module_args.update(kwargs) return getattr(module, module_name)(*args, **module_args) def initialize_list(self, name, module, *args, **kwargs): l = self[name] for to_init in l: module_name = to_init['type'] module_args = dict(to_init['args']) module_args.update(kwargs) (yield getattr(module, module_name)(*args, **module_args)) def __getitem__(self, name): return self.config[name] def get_logger(self, name, verbosity=2): msg_verbosity = 'verbosity option {} is invalid. Valid options are {}.'.format(verbosity, self.log_levels.keys()) assert (verbosity in self.log_levels), msg_verbosity logger = logging.getLogger(name) logger.setLevel(self.log_levels[verbosity]) return logger @property def config(self): return self._config @property def save_dir(self): return self._save_dir @property def log_dir(self): return self._log_dir
def _update_config(config, options, args): for opt in options: value = getattr(args, _get_opt_name(opt.flags)) if (value is not None): _set_by_path(config, opt.target, value) return config
def _get_opt_name(flags): for flg in flags: if flg.startswith('--'): return flg.replace('--', '') return flags[0].replace('--', '')
def _set_by_path(tree, keys, value): 'Set a value in a nested object in tree by sequence of keys.' _get_by_path(tree, keys[:(- 1)])[keys[(- 1)]] = value
def _get_by_path(tree, keys): 'Access a nested object in tree by sequence of keys.' return reduce(getitem, keys, tree)
def get_inception_model(): return tfhub.load(INCEPTION_TFHUB)
def create_inception_graph(pth): 'Creates a graph from saved GraphDef file.' with tf.gfile.FastGFile(pth, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='FID_Inception_Net')
def _get_inception_layer(sess): 'Prepares inception net for batched usage and returns pool_3 layer. ' layername = 'FID_Inception_Net/pool_3:0' pool3 = sess.graph.get_tensor_by_name(layername) ops = pool3.graph.get_operations() for (op_idx, op) in enumerate(ops): for o in op.outputs: shape = o.get_shape() if (shape._dims != []): new_shape = [] for (j, s) in enumerate(shape): if ((s == 1) and (j == 0)): new_shape.append(None) else: new_shape.append(s) o.__dict__['_shape_val'] = tf.TensorShape(new_shape) return pool3
def get_activations(images, sess, batch_size=50, verbose=False): 'Calculates the activations of the pool_3 layer for all images.\n\n Params:\n -- images : Numpy array of dimension (n_images, hi, wi, 3). The values\n must lie between 0 and 256.\n -- sess : current session\n -- batch_size : the images numpy array is split into batches with batch size\n batch_size. A reasonable batch size depends on the disposable hardware.\n -- verbose : If set to True and parameter out_step is given, the number of calculated\n batches is reported.\n Returns:\n -- A numpy array of dimension (num images, 2048) that contains the\n activations of the given tensor when feeding inception with the query tensor.\n ' inception_layer = _get_inception_layer(sess) d0 = images.shape[0] if (batch_size > d0): print('warning: batch size is bigger than the data size. setting batch size to data size') batch_size = d0 n_batches = (d0 // batch_size) n_used_imgs = (n_batches * batch_size) pred_arr = np.empty((n_used_imgs, 2048)) for i in range(n_batches): if verbose: print(('\rPropagating batch %d/%d' % ((i + 1), n_batches)), end='', flush=True) start = (i * batch_size) end = (start + batch_size) batch = images[start:end] pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch}) pred_arr[start:end] = pred.reshape(batch_size, (- 1)) if verbose: print(' done') return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-06): "Numpy implementation of the Frechet Distance.\n The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)\n and X_2 ~ N(mu_2, C_2) is\n d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).\n\n Stable version by Dougal J. Sutherland.\n\n Params:\n -- mu1 : Numpy array containing the activations of the pool_3 layer of the\n inception net ( like returned by the function 'get_predictions')\n for generated samples.\n -- mu2 : The sample mean over activations of the pool_3 layer, precalcualted\n on an representive data set.\n -- sigma1: The covariance matrix over activations of the pool_3 layer for\n generated samples.\n -- sigma2: The covariance matrix over activations of the pool_3 layer,\n precalcualted on an representive data set.\n\n Returns:\n -- : The Frechet Distance.\n " mu1 = np.atleast_1d(mu1) mu2 = np.atleast_1d(mu2) sigma1 = np.atleast_2d(sigma1) sigma2 = np.atleast_2d(sigma2) assert (mu1.shape == mu2.shape), 'Training and test mean vectors have different lengths' assert (sigma1.shape == sigma2.shape), 'Training and test covariances have different dimensions' diff = (mu1 - mu2) (covmean, _) = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if (not np.isfinite(covmean).all()): msg = ('fid calculation produces singular product; adding %s to diagonal of cov estimates' % eps) print(('warn:' + msg)) offset = (np.eye(sigma1.shape[0]) * eps) covmean = linalg.sqrtm((sigma1 + offset).dot((sigma2 + offset))) if np.iscomplexobj(covmean): if (not np.allclose(np.diagonal(covmean).imag, 0, atol=0.001)): m = np.max(np.abs(covmean.imag)) raise ValueError('Imaginary component {}'.format(m)) covmean = covmean.real tr_covmean = np.trace(covmean) return (((diff.dot(diff) + np.trace(sigma1)) + np.trace(sigma2)) - (2 * tr_covmean))
def calculate_activation_statistics(images, sess, batch_size=50, verbose=False): 'Calculation of the statistics used by the FID.\n Params:\n -- images : Numpy array of dimension (n_images, hi, wi, 3). The values\n must lie between 0 and 255.\n -- sess : current session\n -- batch_size : the images numpy array is split into batches with batch size\n batch_size. A reasonable batch size depends on the available hardware.\n -- verbose : If set to True and parameter out_step is given, the number of calculated\n batches is reported.\n Returns:\n -- mu : The mean over samples of the activations of the pool_3 layer of\n the incption model.\n -- sigma : The covariance matrix of the activations of the pool_3 layer of\n the incption model.\n ' act = get_activations(images, sess, batch_size, verbose) mu = np.mean(act, axis=0) sigma = np.cov(act, rowvar=False) return (mu, sigma)
def check_or_download_inception(inception_path): ' Checks if the path to the inception file is valid, or downloads\n the file if it is not present. ' INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' if (inception_path is None): inception_path = '/tmp' download_file = (inception_path + '/classify_image_graph_def.tar') model_file = (inception_path + '/classify_image_graph_def.pb') if (not os.path.exists(model_file)): print('Downloading Inception model') download_path = get_file(download_file, origin=INCEPTION_URL) with tf.gfile.Open(download_path, mode='rb') as gf: with tarfile.open(fileobj=gf, mode='r') as f: f.extract('classify_image_graph_def.pb', inception_path) return str(model_file)
def fid_score(create_session, data, samples, path='/tmp', cpu_only=False): with create_session() as sess: if cpu_only: with tf.device('cpu'): inception_path = check_or_download_inception(path) create_inception_graph(str(inception_path)) data = data samples = samples (m1, s1) = calculate_activation_statistics(data, sess) (m2, s2) = calculate_activation_statistics(samples, sess) fid_value = calculate_frechet_distance(m1, s1, m2, s2) return fid_value else: inception_path = check_or_download_inception(path) create_inception_graph(str(inception_path)) data = data samples = samples (m1, s1) = calculate_activation_statistics(data, sess) (m2, s2) = calculate_activation_statistics(samples, sess) fid_value = calculate_frechet_distance(m1, s1, m2, s2) return fid_value
def load_dataset_stats(config): 'Load the pre-computed dataset statistics.' filename = 'statistics/statistics_{}.npz'.format(config.problem) with tf2.io.gfile.GFile(filename, 'rb') as fin: stats = np.load(fin) return stats
def classifier_fn_from_tfhub(tfhub_module, output_fields, inception_model, return_tensor=False): 'Returns a function that can be as a classifier function.\n\n Copied from tfgan but avoid loading the model each time calling _classifier_fn\n\n Wrapping the TF-Hub module in another function defers loading the module until\n use, which is useful for mocking and not computing heavy default arguments.\n\n Args:\n tfhub_module: A string handle for a TF-Hub module.\n output_fields: A string, list, or `None`. If present, assume the module\n outputs a dictionary, and select this field.\n inception_model: A model loaded from TFHub.\n return_tensor: If `True`, return a single tensor instead of a dictionary.\n\n Returns:\n A one-argument function that takes an image Tensor and returns outputs.\n ' if isinstance(output_fields, six.string_types): output_fields = [output_fields] def _classifier_fn(images): output = inception_model(images) if (output_fields is not None): output = {x: output[x] for x in output_fields} if return_tensor: assert (len(output) == 1) output = list(output.values())[0] return tf2.nest.map_structure(tf.compat.v1.layers.flatten, output) return _classifier_fn
@tf2.function def run_inception_jit(inputs, inception_model, num_batches=1): 'Running the inception network. Assuming input is within [0, 255].' inputs = ((tf2.cast(inputs, tf2.float32) - 127.5) / 127.5) return tfgan.eval.run_classifier_fn(inputs, num_batches=num_batches, classifier_fn=classifier_fn_from_tfhub(INCEPTION_TFHUB, None, inception_model), dtypes=_DEFAULT_DTYPES)
@tf2.function def run_inception_distributed(input_tensor, inception_model, num_batches=1): 'Distribute the inception network computation to all available TPUs.\n\n Assuming the input is within [0, 255].\n ' (num_tpus, device_type) = num_device() input_tensors = tf2.split(input_tensor, num_tpus, axis=0) pool3 = [] logits = [] for (i, tensor) in enumerate(input_tensors): with tf2.device('/{}:{}'.format(device_type, i)): tensor_on_device = tf2.identity(tensor) res = run_inception_jit(tensor_on_device, inception_model, num_batches=num_batches) pool3.append(res['pool_3']) logits.append(res['logits']) with tf2.device('/CPU'): return {'pool_3': tf2.concat(pool3, axis=0), 'logits': tf2.concat(logits, axis=0)}
def compute_fid(x_data, x_samples): assert (type(x_data) == np.ndarray) assert (type(x_samples) == np.ndarray) assert (np.min(x_data) > (0.0 - 0.0001)) assert (np.max(x_data) < (255.0 + 0.0001)) assert (np.mean(x_data) > 10.0) assert (np.min(x_samples) > (0.0 - 0.0001)) assert (np.max(x_samples) < (255.0 + 0.0001)) assert (np.mean(x_samples) > 10.0) def create_session(): return tf.Session() path = '/tmp' fid = fid_score(create_session, x_data, x_samples, path) return fid
def main(argv): del argv LARGE_DATASETS = ['celebahq128', 'lsun_bedroom128', 'lsun_bedroom64', 'lsun_church128', 'lsun_church64', 'celeba'] exp_id = pygrid.get_exp_id(__file__) output_dir = pygrid.get_output_dir(exp_id, './') if (FLAGS.problem in LARGE_DATASETS): FLAGS.fid_n_samples = 2560 FLAGS.fid_n_batch = 640 elif (FLAGS.problem == 'celebahq256'): FLAGS.fid_n_samples = 1280 FLAGS.fid_n_batch = 160 hps = AttrDict(get_flag_dict()) hps.output = output_dir if hps.device: set_gpu(hps.device) init_tf2(tf_eager=hps.eager, tf_memory_growth=True) if hps.tpu: resolver = setup_tpu() strategy = tf.distribute.experimental.TPUStrategy(resolver) model = Trainer_dist(hps=hps) else: strategy = None model = Trainer(hps=hps) set_seed(hps.rnd_seed) model.train(output_dir, output_dir, output_dir, strategy)
def get_beta_schedule(*, beta_start, beta_end, num_diffusion_timesteps): betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) betas = np.append(betas, 1.0) assert (betas.shape == ((num_diffusion_timesteps + 1),)) return betas
def get_sigma_schedule(*, beta_start, beta_end, num_diffusion_timesteps): '\n Get the noise level schedule\n :param beta_start: begin noise level\n :param beta_end: end noise level\n :param num_diffusion_timesteps: number of timesteps\n :return:\n -- sigmas: sigma_{t+1}, scaling parameter of epsilon_{t+1}\n -- a_s: sqrt(1 - sigma_{t+1}^2), scaling parameter of x_t\n ' betas = np.linspace(beta_start, beta_end, 1000, dtype=np.float64) betas = np.append(betas, 1.0) assert isinstance(betas, np.ndarray) betas = betas.astype(np.float64) assert ((betas > 0).all() and (betas <= 1).all()) sqrt_alphas = np.sqrt((1.0 - betas)) idx = tf.cast(np.concatenate([(np.arange(num_diffusion_timesteps) * (1000 // ((num_diffusion_timesteps - 1) * 2))), [999]]), dtype=tf.int32) a_s = np.concatenate([[np.prod(sqrt_alphas[:(idx[0] + 1)])], np.asarray([np.prod(sqrt_alphas[(idx[(i - 1)] + 1):(idx[i] + 1)]) for i in np.arange(1, len(idx))])]) sigmas = np.sqrt((1 - (a_s ** 2))) return (sigmas, a_s)
class RecoveryLikelihood(tf.keras.Model): def __init__(self, hps): super(RecoveryLikelihood, self).__init__() self.hps = hps self.num_timesteps = FLAGS.num_diffusion_timesteps (self.sigmas, self.a_s) = get_sigma_schedule(beta_start=0.0001, beta_end=0.02, num_diffusion_timesteps=self.num_timesteps) self.a_s_cum = np.cumprod(self.a_s) self.sigmas_cum = np.sqrt((1 - (self.a_s_cum ** 2))) self.a_s_prev = self.a_s.copy() self.a_s_prev[(- 1)] = 1 self.is_recovery = np.ones((self.num_timesteps + 1), dtype=np.float32) self.is_recovery[(- 1)] = 0 if (self.hps.img_sz == 32): ch_mult = (1, 2, 2, 2) elif (self.hps.img_sz == 128): ch_mult = (1, 2, 2, 2, 4, 4) elif (self.hps.img_sz == 64): ch_mult = (1, 2, 2, 2, 4) elif (self.hps.img_sz == 256): ch_mult = (1, 1, 2, 2, 2, 4, 4) else: raise NotImplementedError self.net = net_res_temb2(name='net', ch=128, ch_mult=ch_mult, num_res_blocks=FLAGS.num_res_blocks, attn_resolutions=(16,)) def init(self, x_shape): '\n Initialization function to activate model weights.\n :param x_shape: input date shape\n ' x = tf.random.uniform(x_shape, minval=(- 0.5), maxval=0.5) self.net(x, 0, dropout=0.0) @staticmethod def _extract(a, t, x_shape): '\n Extract some coefficients at specified timesteps,\n then reshape to [batch_size, 1, 1, 1, 1, ...] for broadcasting purposes.\n ' if (isinstance(t, int) or (len(t.shape) == 0)): t = (tf.ones(x_shape[0], dtype=tf.int32) * t) (bs,) = t.shape assert (x_shape[0] == bs) out = tf.gather(tf.convert_to_tensor(a, dtype=tf.float32), t) assert (out.shape == [bs]) return tf.reshape(out, ([bs] + ((len(x_shape) - 1) * [1]))) def q_sample(self, x_start, t, *, noise=None): '\n Diffuse the data (t == 0 means diffused for 1 step)\n ' if (noise is None): noise = tf.random.normal(shape=x_start.shape) assert (noise.shape == x_start.shape) x_t = ((self._extract(self.a_s_cum, t, x_start.shape) * x_start) + (self._extract(self.sigmas_cum, t, x_start.shape) * noise)) return x_t def q_sample_pairs(self, x_start, t): '\n Generate a pair of disturbed images for training\n :param x_start: x_0\n :param t: time step t\n :return: x_t, x_{t+1}\n ' noise = tf.random.normal(shape=x_start.shape) x_t = self.q_sample(x_start, t) x_t_plus_one = ((self._extract(self.a_s, (t + 1), x_start.shape) * x_t) + (self._extract(self.sigmas, (t + 1), x_start.shape) * noise)) return (x_t, x_t_plus_one) def q_sample_progressive(self, x_0): '\n Generate a full sequence of disturbed images\n ' x_preds = [] for t in range((self.num_timesteps + 1)): t_now = (tf.ones([x_0.shape[0]], dtype=tf.int32) * t) x = self.q_sample(x_0, t_now) x_preds.append(x) x_preds = tf.stack(x_preds, axis=0) return x_preds def training_losses(self, x_pos, x_neg, t, *, dropout=0.0): '\n Training loss calculation\n ' a_s = self._extract(self.a_s_prev, (t + 1), x_pos.shape) y_pos = (a_s * x_pos) y_neg = (a_s * x_neg) pos_f = self.net(y_pos, t, dropout=dropout) neg_f = self.net(y_neg, t, dropout=dropout) loss = (- (pos_f - neg_f)) loss_scale = (1.0 / (tf.cast(tf.gather(self.sigmas, (t + 1)), tf.float32) / self.sigmas[1])) loss = (loss_scale * loss) loss_ts = tf.math.unsorted_segment_mean(tf.abs(loss), t, self.num_timesteps) f_ts = tf.math.unsorted_segment_mean(tf.abs(pos_f), t, self.num_timesteps) return (tf.nn.compute_average_loss(loss, global_batch_size=self.hps.n_batch_train), loss_ts, f_ts) def log_prob(self, y, t, tilde_x, b0, sigma, is_recovery, *, dropout): return ((self.net(y, t, dropout=dropout) / tf.reshape(b0, [(- 1)])) - tf.reduce_sum((((((y - tilde_x) ** 2) / 2) / (sigma ** 2)) * is_recovery), axis=[1, 2, 3])) def grad_f(self, y, t, tilde_x, b0, sigma, is_recovery, *, dropout): with tf.GradientTape() as tape: tape.watch(y) log_p_y = self.log_prob(y, t, tilde_x, b0, sigma, is_recovery, dropout=dropout) grad_y = tape.gradient(log_p_y, y) return (grad_y, log_p_y) def p_sample_langevin(self, tilde_x, t, *, dropout): '\n Langevin sampling function\n ' sigma = self._extract(self.sigmas, (t + 1), tilde_x.shape) sigma_cum = self._extract(self.sigmas_cum, t, tilde_x.shape) is_recovery = self._extract(self.is_recovery, (t + 1), tilde_x.shape) a_s = self._extract(self.a_s_prev, (t + 1), tilde_x.shape) c_t_square = (sigma_cum / self.sigmas_cum[0]) step_size_square = ((c_t_square * self.hps.mcmc_step_size_b_square) * (sigma ** 2)) y = tf.identity(tilde_x) is_accepted_summary = tf.zeros(y.shape[0], dtype=tf.float32) (grad_y, log_p_y) = self.grad_f(y, t, tilde_x, step_size_square, sigma, is_recovery, dropout=dropout) for _ in tf.range(tf.convert_to_tensor(self.hps.mcmc_num_steps)): noise = tf.random.normal(y.shape) y_new = ((y + ((0.5 * step_size_square) * grad_y)) + ((tf.sqrt(step_size_square) * noise) * FLAGS.noise_scale)) (grad_y_new, log_p_y_new) = self.grad_f(y_new, t, tilde_x, step_size_square, sigma, is_recovery, dropout=dropout) (y, grad_y, log_p_y) = (y_new, grad_y_new, log_p_y_new) is_accepted_summary = (is_accepted_summary / tf.convert_to_tensor(self.hps.mcmc_num_steps, dtype=tf.float32)) is_accepted_summary = tf.reduce_mean(is_accepted_summary) x = (y / a_s) disp = tf.math.unsorted_segment_mean(tf.norm((tf.reshape(x, [x.shape[0], (- 1)]) - tf.reshape(tilde_x, [tilde_x.shape[0], (- 1)])), axis=1), t, self.num_timesteps) return (x, disp, is_accepted_summary) @tf.function def p_sample_progressive(self, noise): '\n Sample a sequence of images with the sequence of noise levels\n ' num = noise.shape[0] x_neg_t = noise x_neg = tf.zeros([self.hps.num_diffusion_timesteps, num, self.hps.img_sz, self.hps.img_sz, 3], dtype=tf.float32) x_neg = tf.concat([x_neg, tf.expand_dims(noise, axis=0)], axis=0) is_accepted_summary = tf.constant(0.0) for t in tf.range((self.hps.num_diffusion_timesteps - 1), (- 1), (- 1)): (x_neg_t, _, is_accepted) = self.p_sample_langevin(x_neg_t, t, dropout=0.0) is_accepted_summary = (is_accepted_summary + is_accepted) x_neg_t = tf.reshape(x_neg_t, [num, self.hps.img_sz, self.hps.img_sz, 3]) insert_mask = tf.equal(t, tf.range((self.hps.num_diffusion_timesteps + 1), dtype=tf.int32)) insert_mask = tf.reshape(tf.cast(insert_mask, dtype=tf.float32), [(- 1), *([1] * len(noise.shape))]) x_neg = ((insert_mask * tf.expand_dims(x_neg_t, axis=0)) + ((1.0 - insert_mask) * x_neg)) is_accepted_summary = (is_accepted_summary / tf.convert_to_tensor(self.hps.num_diffusion_timesteps, dtype=tf.float32)) return (x_neg, is_accepted_summary) def p_sample_progressive_inner(self, noise): '\n Sample a sequence of images with the sequence of noise levels, without tf.function decoration\n ' num = noise.shape[0] x_neg_t = noise x_neg = tf.zeros([self.hps.num_diffusion_timesteps, num, self.hps.img_sz, self.hps.img_sz, 3], dtype=tf.float32) x_neg = tf.concat([x_neg, tf.expand_dims(noise, axis=0)], axis=0) is_accepted_summary = tf.constant(0.0) for t in tf.range((self.hps.num_diffusion_timesteps - 1), (- 1), (- 1)): (x_neg_t, _, is_accepted) = self.p_sample_langevin(x_neg_t, t, dropout=0.0) is_accepted_summary = (is_accepted_summary + is_accepted) x_neg_t = tf.reshape(x_neg_t, [num, self.hps.img_sz, self.hps.img_sz, 3]) insert_mask = tf.equal(t, tf.range((self.hps.num_diffusion_timesteps + 1), dtype=tf.int32)) insert_mask = tf.reshape(tf.cast(insert_mask, dtype=tf.float32), [(- 1), *([1] * len(noise.shape))]) x_neg = ((insert_mask * tf.expand_dims(x_neg_t, axis=0)) + ((1.0 - insert_mask) * x_neg)) is_accepted_summary = (is_accepted_summary / tf.convert_to_tensor(self.hps.num_diffusion_timesteps, dtype=tf.float32)) return (x_neg, is_accepted_summary) @tf.function def distribute_p_sample_progressive(self, noise, strategy): '\n Multi-device distributed version of p_sample_progressive\n ' (samples, is_accepted) = strategy.run(self.p_sample_progressive_inner, args=(noise,)) samples = tf.concat(samples.values, axis=1) is_accepted = strategy.reduce(tf.distribute.ReduceOp.MEAN, is_accepted, axis=None) return (samples, is_accepted)
def init_mp(tf2=True): if tf2: multiprocessing.set_start_method('spawn')
def copy_source(file, output_dir): with tf.io.gfile.GFile(os.path.join(output_dir, os.path.basename(file)), mode='wb') as f: with tf.io.gfile.GFile(file, mode='rb') as f0: shutil.copyfileobj(f0, f)
class FileHandler(StreamHandler): '\n A handler class which writes formatted logging records to disk files.\n ' def __init__(self, filename, mode='a', encoding=None, delay=False): '\n Open the specified file and use it as the stream for logging.\n ' self.baseFilename = os.path.abspath(filename) self.mode = mode self.encoding = encoding self.delay = delay if delay: Handler.__init__(self) self.stream = None else: StreamHandler.__init__(self, self._open()) with tf.io.gfile.GFile(self.baseFilename, 'w') as f: f.write('Logging ........\n') def close(self): '\n Closes the stream.\n ' self.acquire() try: try: if self.stream: try: self.flush() finally: stream = self.stream self.stream = None if hasattr(stream, 'close'): stream.close() finally: StreamHandler.close(self) finally: self.release() def _open(self): '\n Open the current base file with the (original) mode and encoding.\n Return the resulting stream.\n ' return tf.io.gfile.GFile(self.baseFilename, self.mode) def emit(self, record): "\n Emit a record.\n\n If the stream was not opened because 'delay' was specified in the\n constructor, open it before calling the superclass's emit.\n " if (self.stream is None): self.stream = self._open() StreamHandler.emit(self, record) def __repr__(self): level = getLevelName(self.level) return ('<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level))
def setup_logging_file(name, f, console=True): log_format = logging.Formatter('%(asctime)s : %(message)s') logger = logging.getLogger(name) logger.handlers = [] file_handler = FileHandler(f) file_handler.setFormatter(log_format) logger.addHandler(file_handler) if console: console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(log_format) logger.addHandler(console_handler) logger.setLevel(logging.INFO) return logger
def setup_logging(name, output_dir, console=True): log_format = logging.Formatter('%(asctime)s : %(message)s') logger = logging.getLogger(name) logger.handlers = [] output_file = os.path.join(output_dir, 'output.log') file_handler = FileHandler(output_file) file_handler.setFormatter(log_format) logger.addHandler(file_handler) if console: console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(log_format) logger.addHandler(console_handler) logger.setLevel(logging.INFO) return logger
def get_argv(): argv = sys.argv for i in range(1, len(argv)): if (argv[i] == '--ckpt_load'): argv.pop(i) argv.pop(i) break for i in range(1, len(argv)): if argv[i].startswith('--ckpt_load='): argv.pop(i) break for i in range(1, len(argv)): if (argv[i] == '--device'): argv.pop(i) argv.pop(i) break for i in range(1, len(argv)): if argv[i].startswith('--device='): argv.pop(i) break return ''.join(argv[1:])
def get_output_filename(file): file_name = get_exp_id(file) if (len(sys.argv) > 1): file_name = (file_name + get_argv()) return file_name
def get_output_dir(exp_id, rootdir): t = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') output_dir = os.path.join(rootdir, ('output/' + exp_id), t) if (len(sys.argv) > 1): output_dir = (output_dir + get_argv()) if (not os.path.exists(output_dir)): os.makedirs(output_dir) return output_dir
def fill_queue(device_ids): [free_devices.put_nowait(device_id) for device_id in device_ids]
def allocate_device(): try: free_devices_lock.acquire() return free_devices.get() finally: free_devices_lock.release()
def free_device(device): try: free_devices_lock.acquire() return free_devices.put_nowait(device) finally: free_devices_lock.release()
def update_job_status(job_id, job_status, read_opts, write_opts): try: job_file_lock.acquire() opts = read_opts() opt = next((opt for opt in opts if (opt['job_id'] == job_id))) opt['status'] = job_status write_opts(opts) except Exception: logging.exception('exception in update_job_status()') finally: job_file_lock.release()
def update_job_result_file(update_job_result, job_opt, job_stats, read_opts, write_opts): try: job_file_lock.acquire() opts = read_opts() target_opt = next((opt for opt in opts if (opt['job_id'] == job_opt['job_id']))) update_job_result(target_opt, job_stats) write_opts(opts) finally: job_file_lock.release()
def run_job(logger, opt, output_dir, output_dir_ckpt, train): device_id = allocate_device() opt_override = {'device': device_id} def merge(a, b): d = {} d.update(a) d.update(b) return d opt = merge(opt, opt_override) logger.info('new job: job_id={}, device_id={}'.format(opt['job_id'], opt['device'])) try: logger.info('spawning process: job_id={}, device_id={}'.format(opt['job_id'], opt['device'])) try: output_dir_thread = os.path.join(output_dir, str(opt['job_id'])) os.makedirs(output_dir_thread, exist_ok=True) output_dir_thread_ckpt = os.path.join(output_dir_ckpt, str(opt['job_id'])) os.makedirs(output_dir_thread_ckpt, exist_ok=True) run_job_lock.acquire() manager = multiprocessing.Manager() return_dict = manager.dict() p = multiprocessing.Process(target=train, args=(opt, output_dir, output_dir_thread, output_dir_thread_ckpt, return_dict)) p.start() finally: run_job_lock.release() p.join() logger.info('finished process: job_id={}, device_id={}'.format(opt['job_id'], opt['device'])) return return_dict['stats'] finally: free_device(device_id)
def run_jobs(logger, exp_id, output_dir, output_dir_ckpt, workers, train_job, read_opts, write_opts, update_job_result): opt_list = read_opts() opt_open = [opt for opt in opt_list if (opt['status'] == 'open')] logger.info('scheduling {} open of {} total jobs'.format(len(opt_open), len(opt_list))) logger.info('starting thread pool with {} workers'.format(workers)) with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor: def adjust_opt(opt): opt_override = {'exp_id': '{}_{}'.format(exp_id, opt['job_id'])} def merge(a, b): d = {} d.update(a) d.update(b) return d return merge(opt, opt_override) def do_run_job(opt): update_job_status(opt['job_id'], 'running', read_opts, write_opts) return run_job(logger, adjust_opt(opt), output_dir, output_dir_ckpt, train_job) futures = {executor.submit(do_run_job, opt): opt for opt in opt_open} for future in concurrent.futures.as_completed(futures): opt = futures[future] try: stats = future.result() logger.info('finished job future: job_id={}'.format(opt['job_id'])) update_job_result_file(update_job_result, opt, stats, read_opts, write_opts) update_job_status(opt['job_id'], 'finished', read_opts, write_opts) except Exception: logger.exception('exception in run_jobs()') update_job_status(opt['job_id'], 'fail', read_opts, write_opts)
def is_int(value): try: int(value) return True except ValueError: return False
def is_float(value): try: float(value) return (not is_int(value)) except ValueError: return False
def is_bool(value): return (value.upper() in ['TRUE', 'FALSE'])
def is_array(value): return ('[' in value)
def cast_str(value): if is_int(value): return int(value) if is_float(value): return float(value) if is_bool(value): return (value.upper() == 'TRUE') if is_array(value): return eval(value) return value
def get_exp_id(file): return os.path.splitext(os.path.basename(file))[0]
def overwrite_opt(opt, opt_override): for (k, v) in opt_override.items(): setattr(opt, k, v) return opt
def write_opts(opt_list, f): writer = csv.writer(f(), delimiter=',') header = [key for key in opt_list[0]] writer.writerow(header) for opt in opt_list: writer.writerow([opt[k] for k in header])
def read_opts(f): opt_list = [] reader = csv.reader(f(), delimiter=',') header = next(reader) for values in reader: opt = {} for (i, field) in enumerate(header): opt[field] = cast_str(values[i]) opt_list += [opt] return opt_list
def reset_job_status(opts_list): for opt in opts_list: if (opt['status'] == 'running'): opt['status'] = 'open' return opts_list
class AStar(): def __init__(self, neighbor_func, dist_func='euclidian', heuristic_func='euclidian', bias=0.0, silent=True): self.neighbor_func = neighbor_func self.heuristic_func = heuristic_func self.dist_func = dist_func if (heuristic_func == 'euclidian'): self.heuristic_func = (lambda x, y: np.sqrt(np.sum(((x - y) ** 2)))) if (dist_func == 'euclidian'): self.dist_func = (lambda x, y: np.sqrt(np.sum(((x - y) ** 2)))) self.bias = bias self.silent = silent def reconstruct_path(self, came_from, current): total_path = [current] while (current in came_from): current = came_from[current] total_path.append(current) total_path.reverse() return total_path def __call__(self, current, goal): neighbor_func = self.neighbor_func heuristic_func = self.heuristic_func dist_func = self.dist_func bias = self.bias silent = self.silent closedset = set([]) openset = set([current]) openheap = [(0, current)] came_from = {} g_score = {current: 0} i = 0 while len(openset): current = heapq.heappop(openheap)[1] if (current == goal): self.closedset = closedset return self.reconstruct_path(came_from, goal) if ((not silent) and ((i % 100000) == 0)): print('[AStar] current: ', current) openset.remove(current) closedset.add(current) for neighbor in neighbor_func(current): if (neighbor in closedset): continue tentative_g_score = (g_score[current] + dist_func(current, neighbor)) if ((neighbor not in openset) or (tentative_g_score < g_score[neighbor])): came_from[neighbor] = current g_score[neighbor] = tentative_g_score if (neighbor not in openset): score = (tentative_g_score + ((1 + bias) * heuristic_func(neighbor, goal))) openset.add(neighbor) heapq.heappush(openheap, (score, neighbor)) i += 1 raise Exception('Goal State Not Found')
class Pivots(): '\n Pivots is an ndarray of angular rotations\n\n This wrapper provides some functions for\n working with pivots.\n\n These are particularly useful as a number \n of atomic operations (such as adding or \n subtracting) cannot be achieved using\n the standard arithmatic and need to be\n defined differently to work correctly\n ' def __init__(self, ps): self.ps = np.array(ps) def __str__(self): return (('Pivots(' + str(self.ps)) + ')') def __repr__(self): return (('Pivots(' + repr(self.ps)) + ')') def __add__(self, other): return Pivots(np.arctan2(np.sin((self.ps + other.ps)), np.cos((self.ps + other.ps)))) def __sub__(self, other): return Pivots(np.arctan2(np.sin((self.ps - other.ps)), np.cos((self.ps - other.ps)))) def __mul__(self, other): return Pivots((self.ps * other.ps)) def __div__(self, other): return Pivots((self.ps / other.ps)) def __mod__(self, other): return Pivots((self.ps % other.ps)) def __pow__(self, other): return Pivots((self.ps ** other.ps)) def __lt__(self, other): return (self.ps < other.ps) def __le__(self, other): return (self.ps <= other.ps) def __eq__(self, other): return (self.ps == other.ps) def __ne__(self, other): return (self.ps != other.ps) def __ge__(self, other): return (self.ps >= other.ps) def __gt__(self, other): return (self.ps > other.ps) def __abs__(self): return Pivots(abs(self.ps)) def __neg__(self): return Pivots((- self.ps)) def __iter__(self): return iter(self.ps) def __len__(self): return len(self.ps) def __getitem__(self, k): return Pivots(self.ps[k]) def __setitem__(self, k, v): self.ps[k] = v.ps def _ellipsis(self): return tuple(map((lambda x: slice(None)), self.shape)) def quaternions(self, plane='xz'): fa = self._ellipsis() axises = np.ones((self.ps.shape + (3,))) axises[(fa + ('xyz'.index(plane[0]),))] = 0.0 axises[(fa + ('xyz'.index(plane[1]),))] = 0.0 return Quaternions.from_angle_axis(self.ps, axises) def directions(self, plane='xz'): dirs = np.zeros((len(self.ps), 3)) dirs['xyz'.index(plane[0])] = np.sin(self.ps) dirs['xyz'.index(plane[1])] = np.cos(self.ps) return dirs def normalized(self): xs = np.copy(self.ps) while np.any((xs > np.pi)): xs[(xs > np.pi)] = (xs[(xs > np.pi)] - (2 * np.pi)) while np.any((xs < (- np.pi))): xs[(xs < (- np.pi))] = (xs[(xs < (- np.pi))] + (2 * np.pi)) return Pivots(xs) def interpolate(self, ws): dir = np.average(self.directions, weights=ws, axis=0) return np.arctan2(dir[2], dir[0]) def copy(self): return Pivots(np.copy(self.ps)) @property def shape(self): return self.ps.shape @classmethod def from_quaternions(cls, qs, forward='z', plane='xz'): ds = np.zeros((qs.shape + (3,))) ds[(..., 'xyz'.index(forward))] = 1.0 return Pivots.from_directions((qs * ds), plane=plane) @classmethod def from_directions(cls, ds, plane='xz'): ys = ds[(..., 'xyz'.index(plane[0]))] xs = ds[(..., 'xyz'.index(plane[1]))] return Pivots(np.arctan2(ys, xs))
class DataAugmentationForVideoDistillation(object): def __init__(self, args, num_frames=None): self.input_mean = [0.485, 0.456, 0.406] self.input_std = [0.229, 0.224, 0.225] normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleTwoResizedCrop(args.input_size, args.teacher_input_size, [1, 0.875, 0.75, 0.66]) self.transform = transforms.Compose([Stack(roll=False), ToTorchFormatTensor(div=True), normalize]) window_size = (args.window_size if (num_frames is None) else ((num_frames // args.tubelet_size), args.window_size[1], args.window_size[2])) if (args.mask_type == 'tube'): self.masked_position_generator = TubeMaskingGenerator(window_size, args.mask_ratio) elif (args.mask_type == 'random'): self.masked_position_generator = RandomMaskingGenerator(window_size, args.mask_ratio) def __call__(self, images): (process_data_0, process_data_1, labels) = self.train_augmentation(images) (process_data_0, _) = self.transform((process_data_0, labels)) (process_data_1, _) = self.transform((process_data_1, labels)) return (process_data_0, process_data_1, self.masked_position_generator()) def __repr__(self): repr = '(DataAugmentationForVideoDistillation,\n' repr += (' transform = %s,\n' % str(self.transform)) repr += (' Masked position generator = %s,\n' % str(self.masked_position_generator)) repr += ')' return repr
def build_distillation_dataset(args, num_frames=None): if (num_frames is None): num_frames = args.num_frames transform = DataAugmentationForVideoDistillation(args, num_frames=num_frames) dataset = VideoDistillation(root=args.data_root, setting=args.data_path, video_ext='mp4', is_color=True, modality='rgb', new_length=num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, video_loader=True, use_decord=True, lazy_init=False, num_sample=args.num_sample, num_segments=args.num_sample) print(('Data Aug = %s' % str(transform))) return dataset
def build_dataset(is_train, test_mode, args): if (args.data_set == 'Kinetics-400'): mode = None anno_path = None if (is_train is True): mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif (test_mode is True): mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'val.csv') dataset = VideoClsDataset(anno_path=anno_path, data_path=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=(1 if (not test_mode) else 3), keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 400 elif (args.data_set == 'SSV2'): mode = None anno_path = None if (is_train is True): mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif (test_mode is True): mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'val.csv') dataset = SSVideoClsDataset(anno_path=anno_path, data_path=args.data_root, mode=mode, clip_len=1, num_segment=args.num_frames, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=(1 if (not test_mode) else 3), keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 174 elif (args.data_set == 'UCF101'): mode = None anno_path = None if (is_train is True): mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif (test_mode is True): mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'test.csv') dataset = VideoClsDataset(anno_path=anno_path, data_path=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=(1 if (not test_mode) else 3), keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 101 elif (args.data_set == 'HMDB51'): mode = None anno_path = None if (is_train is True): mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif (test_mode is True): mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'test.csv') dataset = VideoClsDataset(anno_path=anno_path, data_path=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=(1 if (not test_mode) else 3), keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 51 else: raise NotImplementedError() assert (nb_classes == args.nb_classes) print(('Number of the class = %d' % args.nb_classes)) return (dataset, nb_classes)
def train_class_batch(model, samples, target, criterion): outputs = model(samples) loss = criterion(outputs, target) return (loss, outputs)
def get_loss_scale_for_deepspeed(model): optimizer = model.optimizer return (optimizer.loss_scale if hasattr(optimizer, 'loss_scale') else optimizer.cur_scale)
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, model_ema: Optional[ModelEma]=None, mixup_fn=None, log_writer=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None, num_training_steps_per_epoch=None, update_freq=None): model.train(True) metric_logger = utils.MetricLogger(delimiter=' ') metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 if (loss_scaler is None): model.zero_grad() model.micro_steps = 0 else: optimizer.zero_grad() for (data_iter_step, (samples, targets, _, _)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): step = (data_iter_step // update_freq) if (step >= num_training_steps_per_epoch): continue it = (start_steps + step) if ((lr_schedule_values is not None) or ((wd_schedule_values is not None) and ((data_iter_step % update_freq) == 0))): for (i, param_group) in enumerate(optimizer.param_groups): if (lr_schedule_values is not None): param_group['lr'] = (lr_schedule_values[it] * param_group['lr_scale']) if ((wd_schedule_values is not None) and (param_group['weight_decay'] > 0)): param_group['weight_decay'] = wd_schedule_values[it] samples = samples.to(device, non_blocking=True) targets = targets.to(device, non_blocking=True) if (mixup_fn is not None): (samples, targets) = mixup_fn(samples, targets) if (loss_scaler is None): samples = samples.half() (loss, output) = train_class_batch(model, samples, targets, criterion) else: with torch.cuda.amp.autocast(): (loss, output) = train_class_batch(model, samples, targets, criterion) loss_value = loss.item() if (not math.isfinite(loss_value)): print('Loss is {}, stopping training'.format(loss_value)) sys.exit(1) if (loss_scaler is None): loss /= update_freq model.backward(loss) model.step() if (((data_iter_step + 1) % update_freq) == 0): if (model_ema is not None): model_ema.update(model) grad_norm = None loss_scale_value = get_loss_scale_for_deepspeed(model) else: is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order) loss /= update_freq grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order, update_grad=(((data_iter_step + 1) % update_freq) == 0)) if (((data_iter_step + 1) % update_freq) == 0): optimizer.zero_grad() if (model_ema is not None): model_ema.update(model) loss_scale_value = loss_scaler.state_dict()['scale'] torch.cuda.synchronize() if (mixup_fn is None): class_acc = (output.max((- 1))[(- 1)] == targets).float().mean() else: class_acc = None metric_logger.update(loss=loss_value) metric_logger.update(class_acc=class_acc) metric_logger.update(loss_scale=loss_scale_value) min_lr = 10.0 max_lr = 0.0 for group in optimizer.param_groups: min_lr = min(min_lr, group['lr']) max_lr = max(max_lr, group['lr']) metric_logger.update(lr=max_lr) metric_logger.update(min_lr=min_lr) weight_decay_value = None for group in optimizer.param_groups: if (group['weight_decay'] > 0): weight_decay_value = group['weight_decay'] metric_logger.update(weight_decay=weight_decay_value) metric_logger.update(grad_norm=grad_norm) if (log_writer is not None): log_writer.update(loss=loss_value, head='loss') log_writer.update(class_acc=class_acc, head='loss') log_writer.update(loss_scale=loss_scale_value, head='opt') log_writer.update(lr=max_lr, head='opt') log_writer.update(min_lr=min_lr, head='opt') log_writer.update(weight_decay=weight_decay_value, head='opt') log_writer.update(grad_norm=grad_norm, head='opt') log_writer.set_step() metric_logger.synchronize_between_processes() print('Averaged stats:', metric_logger) return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
@torch.no_grad() def validation_one_epoch(data_loader, model, device): criterion = torch.nn.CrossEntropyLoss() metric_logger = utils.MetricLogger(delimiter=' ') header = 'Val:' model.eval() for batch in metric_logger.log_every(data_loader, 10, header): videos = batch[0] target = batch[1] videos = videos.to(device, non_blocking=True) target = target.to(device, non_blocking=True) with torch.cuda.amp.autocast(): output = model(videos) loss = criterion(output, target) (acc1, acc5) = accuracy(output, target, topk=(1, 5)) batch_size = videos.shape[0] metric_logger.update(loss=loss.item()) metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) metric_logger.synchronize_between_processes() print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
@torch.no_grad() def final_test(data_loader, model, device, file): criterion = torch.nn.CrossEntropyLoss() metric_logger = utils.MetricLogger(delimiter=' ') header = 'Test:' model.eval() final_result = [] for batch in metric_logger.log_every(data_loader, 10, header): videos = batch[0] target = batch[1] ids = batch[2] chunk_nb = batch[3] split_nb = batch[4] videos = videos.to(device, non_blocking=True) target = target.to(device, non_blocking=True) with torch.cuda.amp.autocast(): output = model(videos) loss = criterion(output, target) for i in range(output.size(0)): string = '{} {} {} {} {}\n'.format(ids[i].replace('[', '').replace(']', ''), str(output.data[i].cpu().numpy().tolist()), str(int(target[i].cpu().numpy())), str(int(chunk_nb[i].cpu().numpy())), str(int(split_nb[i].cpu().numpy()))) final_result.append(string) (acc1, acc5) = accuracy(output, target, topk=(1, 5)) batch_size = videos.shape[0] metric_logger.update(loss=loss.item()) metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) if (not os.path.exists(file)): os.mknod(file) with open(file, 'w') as f: f.write('{}, {}\n'.format(acc1, acc5)) for line in final_result: f.write(line) metric_logger.synchronize_between_processes() print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
def merge(eval_path, num_tasks): dict_feats = {} dict_label = {} dict_pos = {} print('Reading individual output files') for x in range(num_tasks): file = os.path.join(eval_path, (str(x) + '.txt')) lines = open(file, 'r').readlines()[1:] for line in lines: line = line.strip() name = line.split('[')[0] label = line.split(']')[1].split(' ')[1] chunk_nb = line.split(']')[1].split(' ')[2] split_nb = line.split(']')[1].split(' ')[3] data = np.fromstring(line.split('[')[1].split(']')[0], dtype=np.float, sep=',') data = softmax(data) if (not (name in dict_feats)): dict_feats[name] = [] dict_label[name] = 0 dict_pos[name] = [] if ((chunk_nb + split_nb) in dict_pos[name]): continue dict_feats[name].append(data) dict_pos[name].append((chunk_nb + split_nb)) dict_label[name] = label print('Computing final results') input_lst = [] print(len(dict_feats)) for (i, item) in enumerate(dict_feats): input_lst.append([i, item, dict_feats[item], dict_label[item]]) from multiprocessing import Pool p = Pool(64) ans = p.map(compute_video, input_lst) top1 = [x[1] for x in ans] top5 = [x[2] for x in ans] pred = [x[0] for x in ans] label = [x[3] for x in ans] (final_top1, final_top5) = (np.mean(top1), np.mean(top5)) return ((final_top1 * 100), (final_top5 * 100))
def compute_video(lst): (i, video_id, data, label) = lst feat = [x for x in data] feat = np.mean(feat, axis=0) pred = np.argmax(feat) top1 = ((int(pred) == int(label)) * 1.0) top5 = ((int(label) in np.argsort((- feat))[:5]) * 1.0) return [pred, top1, top5, int(label)]
class TubeMaskingGenerator(): def __init__(self, input_size, mask_ratio): (self.frames, self.height, self.width) = input_size self.num_patches_per_frame = (self.height * self.width) self.total_patches = (self.frames * self.num_patches_per_frame) self.num_masks_per_frame = int((mask_ratio * self.num_patches_per_frame)) self.total_masks = (self.frames * self.num_masks_per_frame) def __repr__(self): repr_str = 'Maks: total patches {}, mask patches {}'.format(self.total_patches, self.total_masks) return repr_str def __call__(self): mask_per_frame = np.hstack([np.zeros((self.num_patches_per_frame - self.num_masks_per_frame)), np.ones(self.num_masks_per_frame)]) np.random.shuffle(mask_per_frame) mask = np.tile(mask_per_frame, (self.frames, 1)) mask = mask.flatten() return mask
class RandomMaskingGenerator(): def __init__(self, input_size, mask_ratio): (self.frames, self.height, self.width) = input_size self.total_patches = ((self.frames * self.height) * self.width) self.num_masks = int((mask_ratio * self.total_patches)) self.total_masks = self.num_masks def __repr__(self): repr_str = 'Maks: total patches {}, mask patches {}'.format(self.total_patches, self.total_masks) return repr_str def __call__(self): mask = np.hstack([np.zeros((self.total_patches - self.num_masks)), np.ones(self.num_masks)]) np.random.shuffle(mask) return mask
def trunc_normal_(tensor, mean=0.0, std=1.0): __call_trunc_normal_(tensor, mean=mean, std=std, a=(- std), b=std)
class PretrainVisionTransformerEncoder(nn.Module): ' Vision Transformer with support for patch or hybrid CNN input stage\n ' def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=nn.LayerNorm, init_values=None, tubelet_size=2): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, tubelet_size=tubelet_size) num_patches = self.patch_embed.num_patches self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.ModuleList([Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values) for i in range(depth)]) self.norm = norm_layer(embed_dim) self.head = (nn.Linear(embed_dim, num_classes) if (num_classes > 0) else nn.Identity()) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) if (isinstance(m, nn.Linear) and (m.bias is not None)): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = (nn.Linear(self.embed_dim, num_classes) if (num_classes > 0) else nn.Identity()) def forward_features(self, x): x = self.patch_embed(x) x = (x + self.pos_embed.type_as(x).to(x.device).clone().detach()) for (i, blk) in enumerate(self.blocks): x = blk(x) return x def forward(self, x): x = self.forward_features(x) return x
class PretrainVideoTransformerTeacher(nn.Module): ' Vision Transformer with support for patch or hybrid CNN input stage\n ' def __init__(self, img_size=224, patch_size=16, encoder_in_chans=3, encoder_num_classes=0, encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=nn.LayerNorm, init_values=0.0, tubelet_size=2): super().__init__() self.encoder = PretrainVisionTransformerEncoder(img_size=img_size, patch_size=patch_size, in_chans=encoder_in_chans, num_classes=encoder_num_classes, embed_dim=encoder_embed_dim, depth=encoder_depth, num_heads=encoder_num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, norm_layer=norm_layer, init_values=init_values, tubelet_size=tubelet_size) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) if (isinstance(m, nn.Linear) and (m.bias is not None)): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} def forward(self, x): x = self.encoder(x) return x
@register_model def pretrain_videomae_teacher_base_patch16_224(pretrained=False, **kwargs): model = PretrainVideoTransformerTeacher(patch_size=16, encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, encoder_num_classes=0, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load(kwargs['init_ckpt'], map_location='cpu') model.load_state_dict(checkpoint['model']) return model
@register_model def pretrain_videomae_teacher_large_patch16_224(pretrained=False, **kwargs): model = PretrainVideoTransformerTeacher(patch_size=16, encoder_embed_dim=1024, encoder_depth=24, encoder_num_heads=16, encoder_num_classes=0, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load(kwargs['init_ckpt'], map_location='cpu') model.load_state_dict(checkpoint['model']) return model
@register_model def pretrain_videomae_teacher_huge_patch16_224(pretrained=False, **kwargs): model = PretrainVideoTransformerTeacher(patch_size=16, encoder_embed_dim=1280, encoder_depth=32, encoder_num_heads=16, encoder_num_classes=0, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load(kwargs['init_ckpt'], map_location='cpu') model.load_state_dict(checkpoint['model']) return model
class LARS(torch.optim.Optimizer): '\n LARS optimizer, no rate scaling or weight decay for parameters <= 1D.\n ' def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001): defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient) super().__init__(params, defaults) @torch.no_grad() def step(self): for g in self.param_groups: for p in g['params']: dp = p.grad if (dp is None): continue if (p.ndim > 1): dp = dp.add(p, alpha=g['weight_decay']) param_norm = torch.norm(p) update_norm = torch.norm(dp) one = torch.ones_like(param_norm) q = torch.where((param_norm > 0.0), torch.where((update_norm > 0), ((g['trust_coefficient'] * param_norm) / update_norm), one), one) dp = dp.mul(q) param_state = self.state[p] if ('mu' not in param_state): param_state['mu'] = torch.zeros_like(p) mu = param_state['mu'] mu.mul_(g['momentum']).add_(dp) p.add_(mu, alpha=(- g['lr']))
def get_num_layer_for_vit(var_name, num_max_layer): if (var_name in ('cls_token', 'mask_token', 'pos_embed')): return 0 elif var_name.startswith('patch_embed'): return 0 elif var_name.startswith('rel_pos_bias'): return (num_max_layer - 1) elif var_name.startswith('blocks'): layer_id = int(var_name.split('.')[1]) return (layer_id + 1) else: return (num_max_layer - 1)
class LayerDecayValueAssigner(object): def __init__(self, values): self.values = values def get_scale(self, layer_id): return self.values[layer_id] def get_layer_id(self, var_name): return get_num_layer_for_vit(var_name, len(self.values))
def get_parameter_groups(model, weight_decay=1e-05, skip_list=(), get_num_layer=None, get_layer_scale=None): parameter_group_names = {} parameter_group_vars = {} for (name, param) in model.named_parameters(): if (not param.requires_grad): continue if ((len(param.shape) == 1) or name.endswith('.bias') or (name in skip_list)): group_name = 'no_decay' this_weight_decay = 0.0 else: group_name = 'decay' this_weight_decay = weight_decay if (get_num_layer is not None): layer_id = get_num_layer(name) group_name = ('layer_%d_%s' % (layer_id, group_name)) else: layer_id = None if (group_name not in parameter_group_names): if (get_layer_scale is not None): scale = get_layer_scale(layer_id) else: scale = 1.0 parameter_group_names[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'lr_scale': scale} parameter_group_vars[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'lr_scale': scale} parameter_group_vars[group_name]['params'].append(param) parameter_group_names[group_name]['params'].append(name) print(('Param groups = %s' % json.dumps(parameter_group_names, indent=2))) return list(parameter_group_vars.values())
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None): opt_lower = args.opt.lower() weight_decay = args.weight_decay if filter_bias_and_bn: skip = {} if (skip_list is not None): skip = skip_list elif hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale) weight_decay = 0.0 else: parameters = model.parameters() if ('fused' in opt_lower): assert (has_apex and torch.cuda.is_available()), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if (hasattr(args, 'opt_eps') and (args.opt_eps is not None)): opt_args['eps'] = args.opt_eps if (hasattr(args, 'opt_betas') and (args.opt_betas is not None)): opt_args['betas'] = args.opt_betas print('optimizer settings:', opt_args) opt_split = opt_lower.split('_') opt_lower = opt_split[(- 1)] if ((opt_lower == 'sgd') or (opt_lower == 'nesterov')): opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif (opt_lower == 'momentum'): opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif (opt_lower == 'adam'): optimizer = optim.Adam(parameters, **opt_args) elif (opt_lower == 'adamw'): optimizer = optim.AdamW(parameters, **opt_args) elif (opt_lower == 'nadam'): optimizer = Nadam(parameters, **opt_args) elif (opt_lower == 'radam'): optimizer = RAdam(parameters, **opt_args) elif (opt_lower == 'adamp'): optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) elif (opt_lower == 'sgdp'): optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif (opt_lower == 'adadelta'): optimizer = optim.Adadelta(parameters, **opt_args) elif (opt_lower == 'adafactor'): if (not args.lr): opt_args['lr'] = None optimizer = Adafactor(parameters, **opt_args) elif (opt_lower == 'adahessian'): optimizer = Adahessian(parameters, **opt_args) elif (opt_lower == 'rmsprop'): optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif (opt_lower == 'rmsproptf'): optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif (opt_lower == 'novograd'): optimizer = NovoGrad(parameters, **opt_args) elif (opt_lower == 'nvnovograd'): optimizer = NvNovoGrad(parameters, **opt_args) elif (opt_lower == 'fusedsgd'): opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif (opt_lower == 'fusedmomentum'): opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif (opt_lower == 'fusedadam'): optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) elif (opt_lower == 'fusedadamw'): optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) elif (opt_lower == 'fusedlamb'): optimizer = FusedLAMB(parameters, **opt_args) elif (opt_lower == 'fusednovograd'): opt_args.setdefault('betas', (0.95, 0.98)) optimizer = FusedNovoGrad(parameters, **opt_args) elif (opt_lower == 'lars'): opt_args.pop('eps', None) optimizer = LARS(parameters, **opt_args) else: assert (False and 'Invalid optimizer') raise ValueError if (len(opt_split) > 1): if (opt_split[0] == 'lookahead'): optimizer = Lookahead(optimizer) return optimizer
def _interpolation(kwargs): interpolation = kwargs.pop('resample', Image.BILINEAR) if isinstance(interpolation, (list, tuple)): return random.choice(interpolation) else: return interpolation
def _check_args_tf(kwargs): if (('fillcolor' in kwargs) and (_PIL_VER < (5, 0))): kwargs.pop('fillcolor') kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs): pixels = (pct * img.size[0]) _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs): pixels = (pct * img.size[1]) _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs): _check_args_tf(kwargs) if (_PIL_VER >= (5, 2)): return img.rotate(degrees, **kwargs) elif (_PIL_VER >= (5, 0)): (w, h) = img.size post_trans = (0, 0) rotn_center = ((w / 2.0), (h / 2.0)) angle = (- math.radians(degrees)) matrix = [round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round((- math.sin(angle)), 15), round(math.cos(angle), 15), 0.0] def transform(x, y, matrix): (a, b, c, d, e, f) = matrix return ((((a * x) + (b * y)) + c), (((d * x) + (e * y)) + f)) (matrix[2], matrix[5]) = transform(((- rotn_center[0]) - post_trans[0]), ((- rotn_center[1]) - post_trans[1]), matrix) matrix[2] += rotn_center[0] matrix[5] += rotn_center[1] return img.transform(img.size, Image.AFFINE, matrix, **kwargs) else: return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__): return ImageOps.autocontrast(img)
def invert(img, **__): return ImageOps.invert(img)
def equalize(img, **__): return ImageOps.equalize(img)
def solarize(img, thresh, **__): return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__): lut = [] for i in range(256): if (i < thresh): lut.append(min(255, (i + add))) else: lut.append(i) if (img.mode in ('L', 'RGB')): if ((img.mode == 'RGB') and (len(lut) == 256)): lut = ((lut + lut) + lut) return img.point(lut) else: return img
def posterize(img, bits_to_keep, **__): if (bits_to_keep >= 8): return img return ImageOps.posterize(img, bits_to_keep)