code
stringlengths
17
6.64M
def _split_channels(num_chan, num_groups): split = [(num_chan // num_groups) for _ in range(num_groups)] split[0] += (num_chan - sum(split)) return split
class MixedConv(ModuleList): ' Mixed Grouped Convolution\n\n Based on MDConv and GroupedConv in MixNet impl:\n https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py\n ' def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, depthwise=False, conv_layer=None, **kwargs): super(MixedConv, self).__init__() conv_layer = (Conv2d if (conv_layer is None) else conv_layer) kernel_size = (kernel_size if isinstance(kernel_size, list) else [kernel_size]) num_groups = len(kernel_size) in_splits = _split_channels(in_channels, num_groups) out_splits = _split_channels(out_channels, num_groups) self.in_channels = sum(in_splits) self.out_channels = sum(out_splits) for (idx, (k, in_ch, out_ch)) in enumerate(zip(kernel_size, in_splits, out_splits)): conv_groups = (out_ch if depthwise else 1) self.append(conv_layer(in_ch, out_ch, k, stride=stride, padding=padding, dilation=dilation, groups=conv_groups, **kwargs)) self.splits = jnp.array(in_splits).cumsum()[:(- 1)] def __call__(self, x): x_split = jnp.split(x, self.splits, 1) x_out = [c(x_split[i]) for (i, c) in enumerate(self)] x = jnp.concatenate(x_out, axis=1) return x
class _BatchNorm(Module): 'Applies a batch normalization on different ranks of an input tensor.\n\n The module follows the operation described in Algorithm 1 of\n `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift\n <https://arxiv.org/abs/1502.03167>`_.\n ' def __init__(self, num_features: int, redux: Iterable[int], momentum: float=0.9, eps: float=1e-05): 'Creates a BatchNorm module instance.\n\n Args:\n dims: shape of the batch normalization state variables.\n redux: list of indices of reduction axes. Batch norm statistics are computed by averaging over these axes.\n momentum: value used to compute exponential moving average of batch statistics.\n eps: small value which is used for numerical stability.\n ' super().__init__() self.num_features = num_features self.momentum = momentum self.eps = eps self.redux = tuple(redux) self.weight = TrainVar(jnp.ones(num_features)) self.bias = TrainVar(jnp.zeros(num_features)) self.running_mean = StateVar(jnp.zeros(num_features)) self.running_var = StateVar(jnp.ones(num_features)) def __call__(self, x: JaxArray, training: bool) -> JaxArray: 'Performs batch normalization of input tensor.\n\n Args:\n x: input tensor.\n training: if True compute batch normalization in training mode (accumulating batch statistics),\n otherwise compute in evaluation mode (using already accumulated batch statistics).\n\n Returns:\n Batch normalized tensor.\n ' shape = (1, (- 1), 1, 1) weight = self.weight.value.reshape(shape) bias = self.bias.value.reshape(shape) if training: mean = x.mean(self.redux, keepdims=True) var = ((x ** 2).mean(self.redux, keepdims=True) - (mean ** 2)) self.running_mean.value += ((1 - self.momentum) * (mean.squeeze(axis=self.redux) - self.running_mean.value)) self.running_var.value += ((1 - self.momentum) * (var.squeeze(axis=self.redux) - self.running_var.value)) else: (mean, var) = (self.running_mean.value.reshape(shape), self.running_var.value.reshape(shape)) y = (((weight * (x - mean)) * functional.rsqrt((var + self.eps))) + bias) return y
class BatchNorm1d(_BatchNorm): 'Applies a 1D batch normalization on a 3D-input batch of shape (N,C,L).\n\n The module follows the operation described in Algorithm 1 of\n `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift\n <https://arxiv.org/abs/1502.03167>`_.\n ' def __init__(self, num_features: int, momentum: float=0.9, eps: float=1e-05): 'Creates a BatchNorm1D module instance.\n\n Args:\n num_features: number of features in the input example.\n momentum: value used to compute exponential moving average of batch statistics.\n eps: small value which is used for numerical stability.\n ' super().__init__(num_features, (0, 2), momentum, eps)
class BatchNorm2d(_BatchNorm): 'Applies a 2D batch normalization on a 4D-input batch of shape (N,C,H,W).\n\n The module follows the operation described in Algorithm 1 of\n `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift\n <https://arxiv.org/abs/1502.03167>`_.\n ' def __init__(self, num_features: int, momentum: float=0.9, eps: float=1e-05): 'Creates a BatchNorm2D module instance.\n\n Args:\n num_features: number of features in the input example.\n momentum: value used to compute exponential moving average of batch statistics.\n eps: small value which is used for numerical stability.\n ' super().__init__(num_features, (0, 2, 3), momentum, eps)
def to_tuple(v: Union[(Tuple[(Number, ...)], Number, Iterable)], n: int): 'Converts input to tuple.' if isinstance(v, tuple): return v elif isinstance(v, Number): return ((v,) * n) else: return tuple(v)
def validate(args): rng = jax.random.PRNGKey(0) (model, variables) = create_model(args.model, pretrained=True, rng=rng) print(f'Created {args.model} model. Validating...') if args.no_jit: eval_step = (lambda images, labels: eval_forward(model, variables, images, labels)) else: eval_step = jax.jit((lambda images, labels: eval_forward(model, variables, images, labels))) dataset = create_dataset('imagenet', args.data) data_config = resolve_data_config(vars(args), model=model) loader = create_loader(dataset, input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=False, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=8, crop_pct=data_config['crop_pct']) batch_time = AverageMeter() (correct_top1, correct_top5) = (0, 0) total_examples = 0 start_time = prev_time = time.time() for (batch_index, (images, labels)) in enumerate(loader): images = images.numpy().transpose(0, 2, 3, 1) labels = labels.numpy() (top1_count, top5_count) = eval_step(images, labels) correct_top1 += int(top1_count) correct_top5 += int(top5_count) total_examples += images.shape[0] batch_time.update((time.time() - prev_time)) if (((batch_index % 20) == 0) and (batch_index > 0)): print(f'Test: [{batch_index:>4d}/{len(loader)}] Rate: {(images.shape[0] / batch_time.val):>5.2f}/s ({(images.shape[0] / batch_time.avg):>5.2f}/s) Acc@1: {((100 * correct_top1) / total_examples):>7.3f} Acc@5: {((100 * correct_top5) / total_examples):>7.3f}') prev_time = time.time() acc_1 = ((100 * correct_top1) / total_examples) acc_5 = ((100 * correct_top5) / total_examples) print(f'Validation complete. {(total_examples / (prev_time - start_time)):>5.2f} img/s. Acc@1 {acc_1:>7.3f}, Acc@5 {acc_5:>7.3f}') return dict(top1=float(acc_1), top5=float(acc_5))
def eval_forward(model, variables, images, labels): logits = model.apply(variables, images, mutable=False, training=False) (top1_count, top5_count) = correct_topk(logits, labels, topk=(1, 5)) return (top1_count, top5_count)
def main(): args = parser.parse_args() print(('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))) print(('JAX devices:\n%s' % '\n'.join((str(d) for d in jax.devices()))), flush=True) jax.config.enable_omnistaging() def _try_validate(args): res = None batch_size = args.batch_size while (res is None): try: print(f'Setting validation batch size to {batch_size}') args.batch_size = batch_size res = validate(args) except RuntimeError as e: if (batch_size <= 1): print('Validation failed with no ability to reduce batch size. Exiting.') raise e batch_size = max((batch_size // 2), 1) print('Validation failed, reducing batch size by 50%') return res if (get_model_cfg(args.model) is not None): _try_validate(args) else: models = list_models(pretrained=True) if (args.model != 'all'): models = fnmatch.filter(models, args.model) if (not models): print(f'ERROR: No models found to validate with pattern {args.model}.') exit(1) print('Validating:', ', '.join(models)) results = [] start_batch_size = args.batch_size for m in models: args.batch_size = start_batch_size args.model = m res = _try_validate(args) res.update(dict(model=m)) results.append(res) print('Results:') for r in results: print(f"Model: {r['model']}, Top1: {r['top1']}, Top5: {r['top5']}")
def validate(args): model = create_model(args.model, pretrained=True) print(f'Created {args.model} model. Validating...') eval_step = objax.Jit((lambda images, labels: eval_forward(model, images, labels)), model.vars()) dataset = create_dataset('imagenet', args.data) data_config = resolve_data_config(vars(args), model=model) loader = create_loader(dataset, input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=False, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=8, crop_pct=data_config['crop_pct']) batch_time = AverageMeter() (correct_top1, correct_top5) = (0, 0) total_examples = 0 start_time = prev_time = time.time() for (batch_index, (images, labels)) in enumerate(loader): images = images.numpy() labels = labels.numpy() (top1_count, top5_count) = eval_step(images, labels) correct_top1 += int(top1_count) correct_top5 += int(top5_count) total_examples += images.shape[0] batch_time.update((time.time() - prev_time)) if (((batch_index % 20) == 0) and (batch_index > 0)): print(f'Test: [{batch_index:>4d}/{len(loader)}] Rate: {(images.shape[0] / batch_time.val):>5.2f}/s ({(images.shape[0] / batch_time.avg):>5.2f}/s) Acc@1: {((100 * correct_top1) / total_examples):>7.3f} Acc@5: {((100 * correct_top5) / total_examples):>7.3f}') prev_time = time.time() acc_1 = ((100 * correct_top1) / total_examples) acc_5 = ((100 * correct_top5) / total_examples) print(f'Validation complete. {(total_examples / (prev_time - start_time)):>5.2f} img/s. Acc@1 {acc_1:>7.3f}, Acc@5 {acc_5:>7.3f}') return dict(top1=float(acc_1), top5=float(acc_5))
def eval_forward(model, images, labels): logits = model(images, training=False) (top1_count, top5_count) = correct_topk(logits, labels, topk=(1, 5)) return (top1_count, top5_count)
def main(): args = parser.parse_args() print(('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))) print(('JAX devices:\n%s' % '\n'.join((str(d) for d in jax.devices()))), flush=True) def _try_validate(args): res = None batch_size = args.batch_size while (res is None): try: print(f'Setting validation batch size to {batch_size}') args.batch_size = batch_size res = validate(args) except RuntimeError as e: if (batch_size <= 1): print('Validation failed with no ability to reduce batch size. Exiting.') raise e batch_size = max((batch_size // 2), 1) print('Validation failed, reducing batch size by 50%') return res if (get_model_cfg(args.model) is not None): _try_validate(args) else: models = list_models(pretrained=True) if (args.model != 'all'): models = fnmatch.filter(models, args.model) if (not models): print(f'ERROR: No models found to validate with pattern {args.model}.') exit(1) print('Validating:', ', '.join(models)) results = [] start_batch_size = args.batch_size for m in models: args.batch_size = start_batch_size args.model = m res = _try_validate(args) res.update(dict(model=m)) results.append(res) print('Results:') for r in results: print(f"Model: {r['model']}, Top1: {r['top1']}, Top5: {r['top5']}")
@flax.struct.dataclass class TrainState(): step: int variables: flax.core.FrozenDict[(str, Any)] dynamic_scale: flax.optim.DynamicScale opt_tx: optax.GradientTransformation = flax.struct.field(pytree_node=False) opt_state: optax.OptState ema: EmaState
def config_to_opt_args(config: ml_collections.ConfigDict): opt_kwargs = dict(eps=config.get('opt_eps'), decay=config.get('opt_decay'), momentum=config.get('opt_momentum'), beta1=config.get('opt_beta1'), beta2=config.get('opt_beta2'), weight_decay=config.get('opt_weight_decay', 0)) opt_kwargs = {k: v for (k, v) in opt_kwargs.items() if (v is not None)} return opt_kwargs
def create_train_state(config: ml_collections.ConfigDict, variables, lr_fn): 'Create initial training state.' params = variables['params'] dynamic_scale = None platform = jax.local_devices()[0].platform if (config.half_precision and (platform == 'gpu')): dynamic_scale = flax.optim.DynamicScale() opt_tx = create_optax_optim(config.opt, learning_rate=lr_fn, **config_to_opt_args(config)) opt_state = opt_tx.init(params) state = TrainState(step=0, variables=variables, opt_tx=opt_tx, opt_state=opt_state, dynamic_scale=dynamic_scale, ema=EmaState.create(config.ema_decay, variables)) return state
def restore_checkpoint(state, model_dir): return checkpoints.restore_checkpoint(model_dir, state)
def save_checkpoint(state, model_dir): if (jax.host_id() == 0): state = jax.device_get(jax.tree_map((lambda x: x[0]), state)) step = int(state.step) checkpoints.save_checkpoint(model_dir, state, step, keep=3)
def compute_metrics(logits, labels, label_smoothing=0.0): loss = cross_entropy_loss(logits, labels, label_smoothing=label_smoothing) (top1, top5) = acc_topk(logits, labels, (1, 5)) metrics = {'loss': loss, 'top1': top1, 'top5': top5} metrics = lax.pmean(metrics, axis_name='batch') return metrics
def train_step(apply_fn, state: TrainState, batch, lr_fn, label_smoothing=0.1, weight_decay=0.0001, dropout_rng=None): 'Perform a single training step.' def loss_fn(_params): 'loss function used for training.' (_logits, _new_model_state) = apply_fn(state.variables.copy({'params': _params}), batch['image'], training=True, mutable=['batch_stats'], rngs={'dropout': dropout_rng}) loss = cross_entropy_loss(_logits, batch['label'], label_smoothing=label_smoothing) weight_penalty_params = jax.tree_leaves(_params) weight_penalty = ((0.5 * weight_decay) * sum([jnp.sum((x ** 2)) for x in weight_penalty_params if (x.ndim > 1)])) loss = (loss + weight_penalty) return (loss, (_new_model_state, _logits)) step = state.step (variables, params) = state.variables.pop('params') dynamic_scale = state.dynamic_scale if dynamic_scale: grad_fn = state.dynamic_scale.value_and_grad(loss_fn, has_aux=True, axis_name='batch') (dynamic_scale, is_fin, aux, grad) = grad_fn(params) else: grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (aux, grad) = grad_fn(params) grad = lax.pmean(grad, axis_name='batch') (new_model_state, logits) = aux[1] (updates, new_opt_state) = state.opt_tx.update(grad, state.opt_state, params) new_params = optax.apply_updates(params, updates) metrics = compute_metrics(logits, batch['label'], label_smoothing=label_smoothing) metrics['learning_rate'] = lr_fn(step) if dynamic_scale: new_opt_state = jax.tree_multimap(functools.partial(jnp.where, is_fin), new_opt_state, state.opt_state) new_params = jax.tree_multimap(functools.partial(jnp.where, is_fin), new_params, params) metrics['scale'] = dynamic_scale.scale new_variables = state.variables.copy({'params': new_params, **new_model_state}) new_ema = (state.ema.update(new_variables) if (state.ema is not None) else None) new_state = state.replace(step=(step + 1), variables=new_variables, opt_state=new_opt_state, dynamic_scale=dynamic_scale, ema=new_ema) return (new_state, metrics)
def eval_step(apply_fn, state, batch): logits = apply_fn(state.variables, batch['image'], training=False, mutable=False) return compute_metrics(logits, batch['label'])
def eval_step_ema(apply_fn, state, batch): logits = apply_fn(state.ema.variables, batch['image'], training=False, mutable=False) return compute_metrics(logits, batch['label'])
def prepare_tf_data(xs): 'Convert a input batch from tf Tensors to numpy arrays.' local_device_count = jax.local_device_count() def _prepare(x): x = x._numpy() return x.reshape(((local_device_count, (- 1)) + x.shape[1:])) return jax.tree_map(_prepare, xs)
def create_input_iter(dataset_builder, batch_size, train, image_size, augment_name=None, randaug_num_layers=None, randaug_magnitude=None, half_precision=False, cache=False): ds = input_pipeline.create_split(dataset_builder, batch_size, train=train, image_size=image_size, augment_name=augment_name, randaug_num_layers=randaug_num_layers, randaug_magnitude=randaug_magnitude, half_precision=half_precision, cache=cache) it = map(prepare_tf_data, ds) it = flax.jax_utils.prefetch_to_device(it, 2) return it
def sync_batch_stats(state): 'Sync the batch statistics across replicas.' avg = jax.pmap((lambda x: lax.pmean(x, 'x')), 'x') new_variables = state.variables.copy({'batch_stats': avg(state.variables['batch_stats'])}) if (state.ema is not None): new_ema_variables = state.ema.variables.copy({'batch_stats': avg(state.ema.variables['batch_stats'])}) return state.replace(variables=new_variables, ema=state.ema.replace(variables=new_ema_variables)) else: return state.replace(variables=new_variables)
def train_and_evaluate(config: ml_collections.ConfigDict, resume: str): 'Execute model training and evaluation loop.\n\n Args:\n config: Hyperparameter configuration for training and evaluation.\n resume: Resume from checkpoints at specified dir if set (TDDO: support specific checkpoint file/step)\n ' rng = random.PRNGKey(42) if ((config.batch_size % jax.device_count()) > 0): raise ValueError('Batch size must be divisible by the number of devices') local_batch_size = (config.batch_size // jax.host_count()) config.eval_batch_size = (config.eval_batch_size or config.batch_size) if ((config.eval_batch_size % jax.device_count()) > 0): raise ValueError('Validation batch size must be divisible by the number of devices') local_eval_batch_size = (config.eval_batch_size // jax.host_count()) platform = jax.local_devices()[0].platform half_prec = config.half_precision if half_prec: if (platform == 'tpu'): model_dtype = jnp.bfloat16 else: model_dtype = jnp.float16 else: model_dtype = jnp.float32 (rng, model_create_rng) = random.split(rng) (model, variables) = create_model(config.model, dtype=model_dtype, drop_rate=config.drop_rate, drop_path_rate=config.drop_path_rate, rng=model_create_rng) image_size = (config.image_size or model.default_cfg['input_size'][(- 1)]) dataset_builder = tfds.builder(config.dataset, data_dir=config.data_dir) train_iter = create_input_iter(dataset_builder, local_batch_size, train=True, image_size=image_size, augment_name=config.autoaugment, randaug_magnitude=config.randaug_magnitude, randaug_num_layers=config.randaug_num_layers, half_precision=half_prec, cache=config.cache) eval_iter = create_input_iter(dataset_builder, local_eval_batch_size, train=False, image_size=image_size, half_precision=half_prec, cache=config.cache) steps_per_epoch = (dataset_builder.info.splits['train'].num_examples // config.batch_size) if (config.num_train_steps == (- 1)): num_steps = (steps_per_epoch * config.num_epochs) else: num_steps = config.num_train_steps if (config.steps_per_eval == (- 1)): num_validation_examples = dataset_builder.info.splits['validation'].num_examples steps_per_eval = (num_validation_examples // config.eval_batch_size) else: steps_per_eval = config.steps_per_eval steps_per_checkpoint = (steps_per_epoch * 1) base_lr = ((config.lr * config.batch_size) / 256.0) lr_fn = create_lr_schedule_epochs(base_lr, config.lr_schedule, steps_per_epoch=steps_per_epoch, total_epochs=config.num_epochs, decay_rate=config.lr_decay_rate, decay_epochs=config.lr_decay_epochs, warmup_epochs=config.lr_warmup_epochs, min_lr=config.lr_minimum) state = create_train_state(config, variables, lr_fn) if resume: state = restore_checkpoint(state, resume) step_offset = int(state.step) state = flax.jax_utils.replicate(state) p_train_step = jax.pmap(functools.partial(train_step, model.apply, lr_fn=lr_fn, label_smoothing=config.label_smoothing, weight_decay=config.weight_decay), axis_name='batch') p_eval_step = jax.pmap(functools.partial(eval_step, model.apply), axis_name='batch') p_eval_step_ema = None if (config.ema_decay != 0.0): p_eval_step_ema = jax.pmap(functools.partial(eval_step_ema, model.apply), axis_name='batch') if (jax.host_id() == 0): if (resume and (step_offset > 0)): output_dir = resume else: output_base = (config.output_base_dir if config.output_base_dir else './output') exp_name = '-'.join([datetime.now().strftime('%Y%m%d-%H%M%S'), config.model]) output_dir = get_outdir(output_base, exp_name) summary_writer = tensorboard.SummaryWriter(output_dir) summary_writer.hparams(dict(config)) epoch_metrics = [] t_loop_start = time.time() num_samples = 0 for (step, batch) in zip(range(step_offset, num_steps), train_iter): step_p1 = (step + 1) (rng, step_rng) = random.split(rng) sharded_rng = common_utils.shard_prng_key(step_rng) num_samples += config.batch_size (state, metrics) = p_train_step(state, batch, dropout_rng=sharded_rng) epoch_metrics.append(metrics) if ((step_p1 % steps_per_epoch) == 0): epoch = (step // steps_per_epoch) epoch_metrics = common_utils.get_metrics(epoch_metrics) summary = jax.tree_map((lambda x: x.mean()), epoch_metrics) samples_per_sec = (num_samples / (time.time() - t_loop_start)) logging.info('train epoch: %d, loss: %.4f, img/sec %.2f, top1: %.2f, top5: %.3f', epoch, summary['loss'], samples_per_sec, summary['top1'], summary['top5']) if (jax.host_id() == 0): for (key, vals) in epoch_metrics.items(): tag = ('train_%s' % key) for (i, val) in enumerate(vals): summary_writer.scalar(tag, val, ((step_p1 - len(vals)) + i)) summary_writer.scalar('samples per second', samples_per_sec, step) epoch_metrics = [] state = sync_batch_stats(state) eval_metrics = [] for step_eval in range(steps_per_eval): eval_batch = next(eval_iter) metrics = p_eval_step(state, eval_batch) eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics) summary = jax.tree_map((lambda x: x.mean()), eval_metrics) logging.info('eval epoch: %d, loss: %.4f, top1: %.2f, top5: %.3f', epoch, summary['loss'], summary['top1'], summary['top5']) if (p_eval_step_ema is not None): eval_metrics = [] for step_eval in range(steps_per_eval): eval_batch = next(eval_iter) metrics = p_eval_step_ema(state, eval_batch) eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics) summary = jax.tree_map((lambda x: x.mean()), eval_metrics) logging.info('eval epoch ema: %d, loss: %.4f, top1: %.2f, top5: %.3f', epoch, summary['loss'], summary['top1'], summary['top5']) if (jax.host_id() == 0): for (key, val) in eval_metrics.items(): tag = ('eval_%s' % key) summary_writer.scalar(tag, val.mean(), step) summary_writer.flush() t_loop_start = time.time() num_samples = 0 elif ((step_p1 % 100) == 0): summary = jax.tree_map((lambda x: x.mean()), common_utils.get_metrics(epoch_metrics)) samples_per_sec = (num_samples / (time.time() - t_loop_start)) logging.info('train steps: %d, loss: %.4f, img/sec: %.2f', step_p1, summary['loss'], samples_per_sec) if (((step_p1 % steps_per_checkpoint) == 0) or (step_p1 == num_steps)): state = sync_batch_stats(state) save_checkpoint(state, output_dir) jax.random.normal(jax.random.PRNGKey(0), ()).block_until_ready()
def main(argv): if (len(argv) > 1): raise app.UsageError('Too many command-line arguments.') print(('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))) print(('JAX devices:\n%s' % '\n'.join((str(d) for d in jax.devices()))), flush=True) train_and_evaluate(config=FLAGS.config, resume=FLAGS.resume)
def validate(args): rng = jax.random.PRNGKey(0) platform = jax.local_devices()[0].platform if args.half_precision: if (platform == 'tpu'): model_dtype = jax.numpy.bfloat16 else: model_dtype = jax.numpy.float16 else: model_dtype = jax.numpy.float32 (model, variables) = create_model(args.model, pretrained=True, dtype=model_dtype, rng=rng) print(f'Created {args.model} model. Validating...') if args.no_jit: eval_step = (lambda images, labels: eval_forward(model.apply, variables, images, labels)) else: eval_step = jax.jit((lambda images, labels: eval_forward(model.apply, variables, images, labels))) 'Runs evaluation and returns top-1 accuracy.' image_size = model.default_cfg['input_size'][(- 1)] (eval_iter, num_batches) = create_eval_iter(args.data, args.batch_size, image_size, half_precision=args.half_precision, mean=tuple([(x * 255) for x in model.default_cfg['mean']]), std=tuple([(x * 255) for x in model.default_cfg['std']]), interpolation=model.default_cfg['interpolation']) batch_time = AverageMeter() (correct_top1, correct_top5) = (0, 0) total_examples = 0 start_time = prev_time = time.time() for (batch_index, batch) in enumerate(eval_iter): (images, labels) = (batch['image'], batch['label']) (top1_count, top5_count) = eval_step(images, labels) correct_top1 += int(top1_count) correct_top5 += int(top5_count) total_examples += images.shape[0] batch_time.update((time.time() - prev_time)) if (((batch_index % 20) == 0) and (batch_index > 0)): print(f'Test: [{batch_index:>4d}/{num_batches}] Rate: {(images.shape[0] / batch_time.val):>5.2f}/s ({(images.shape[0] / batch_time.avg):>5.2f}/s) Acc@1: {((100 * correct_top1) / total_examples):>7.3f} Acc@5: {((100 * correct_top5) / total_examples):>7.3f}') prev_time = time.time() acc_1 = ((100 * correct_top1) / total_examples) acc_5 = ((100 * correct_top5) / total_examples) print(f'Validation complete. {(total_examples / (prev_time - start_time)):>5.2f} img/s. Acc@1 {acc_1:>7.3f}, Acc@5 {acc_5:>7.3f}') return dict(top1=acc_1, top5=acc_5)
def prepare_tf_data(xs): def _prepare(x): x = x._numpy() return x return jax.tree_map(_prepare, xs)
def create_eval_iter(data_dir, batch_size, image_size, dataset_name='imagenet2012:5.0.0', half_precision=False, mean=None, std=None, interpolation='bicubic'): dataset_builder = tfds.builder(dataset_name, data_dir=data_dir) assert ((dataset_builder.info.splits['validation'].num_examples % batch_size) == 0) num_batches = (dataset_builder.info.splits['validation'].num_examples // batch_size) ds = input_pipeline.create_split(dataset_builder, batch_size, train=False, half_precision=half_precision, image_size=image_size, mean=mean, std=std, interpolation=interpolation, no_repeat=True) it = map(prepare_tf_data, ds) return (it, num_batches)
def eval_forward(apply_fn, variables, images, labels): logits = apply_fn(variables, images, mutable=False, training=False) (top1_count, top5_count) = correct_topk(logits, labels, topk=(1, 5)) return (top1_count, top5_count)
def main(): args = parser.parse_args() print(('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))) print(('JAX devices:\n%s' % '\n'.join((str(d) for d in jax.devices()))), flush=True) if (get_model_cfg(args.model) is not None): validate(args) else: models = list_models(pretrained=True) if (args.model != 'all'): models = fnmatch.filter(models, args.model) if (not models): print(f'ERROR: No models found to validate with pattern {args.model}.') exit(1) print('Validating: ', ', '.join(models)) results = [] for m in models: args.model = m res = validate(args) res.update(dict(model=m)) results.append(res) print('Results:') for r in results: print(f"Model: {r['model']}, Top1: {r['top1']}, Top5: {r['top5']}")
def validate(args): model = create_model(args.model, pretrained=True) print(f'Created {args.model} model. Validating...') eval_step = objax.Jit((lambda images, labels: eval_forward(model, images, labels)), model.vars()) 'Runs evaluation and returns top-1 accuracy.' image_size = model.default_cfg['input_size'][(- 1)] (test_ds, num_batches) = imagenet_data.load(imagenet_data.Split.TEST, is_training=False, image_size=image_size, batch_dims=[args.batch_size], chw=True, mean=tuple([(x * 255) for x in model.default_cfg['mean']]), std=tuple([(x * 255) for x in model.default_cfg['std']]), tfds_data_dir=args.data) batch_time = AverageMeter() (correct_top1, correct_top5) = (0, 0) total_examples = 0 start_time = prev_time = time.time() for (batch_index, batch) in enumerate(test_ds): (images, labels) = (batch['images'], batch['labels']) (top1_count, top5_count) = eval_step(images, labels) correct_top1 += int(top1_count) correct_top5 += int(top5_count) total_examples += images.shape[0] batch_time.update((time.time() - prev_time)) if (((batch_index % 20) == 0) and (batch_index > 0)): print(f'Test: [{batch_index:>4d}/{num_batches}] Rate: {(images.shape[0] / batch_time.val):>5.2f}/s ({(images.shape[0] / batch_time.avg):>5.2f}/s) Acc@1: {((100 * correct_top1) / total_examples):>7.3f} Acc@5: {((100 * correct_top5) / total_examples):>7.3f}') prev_time = time.time() acc_1 = ((100 * correct_top1) / total_examples) acc_5 = ((100 * correct_top5) / total_examples) print(f'Validation complete. {(total_examples / (prev_time - start_time)):>5.2f} img/s. Acc@1 {acc_1:>7.3f}, Acc@5 {acc_5:>7.3f}') return dict(top1=float(acc_1), top5=float(acc_5))
def eval_forward(model, images, labels): logits = model(images, training=False) (top1_count, top5_count) = correct_topk(logits, labels, topk=(1, 5)) return (top1_count, top5_count)
def main(): args = parser.parse_args() logging.set_verbosity(logging.ERROR) print(('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))) print(('JAX devices:\n%s' % '\n'.join((str(d) for d in jax.devices()))), flush=True) if (get_model_cfg(args.model) is not None): validate(args) else: models = list_models(pretrained=True) if (args.model != 'all'): models = fnmatch.filter(models, args.model) if (not models): print(f'ERROR: No models found to validate with pattern ({args.model}).') exit(1) print('Validating:', ', '.join(models)) results = [] for m in models: args.model = m res = validate(args) res.update(dict(model=m)) results.append(res) print('Results:') for r in results: print(f"Model: {r['model']}, Top1: {r['top1']}, Top5: {r['top5']}")
def get_config(): 'Get the default hyperparameter configuration.' config = ml_collections.ConfigDict() config.output_base_dir = '' config.data_dir = '/data/' config.dataset = 'imagenet2012:5.0.0' config.num_classes = 1000 config.model = 'tf_efficientnet_b0' config.image_size = 0 config.batch_size = 224 config.eval_batch_size = 100 config.lr = 0.016 config.label_smoothing = 0.1 config.weight_decay = 1e-05 config.ema_decay = 0.99997 config.opt = 'rmsproptf' config.opt_eps = 0.001 config.opt_momentum = 0.9 config.opt_decay = 0.9 config.opt_weight_decay = 0.0 config.lr_schedule = 'step' config.lr_decay_rate = 0.97 config.lr_decay_epochs = 2.4 config.lr_warmup_epochs = 5.0 config.lr_minimum = 1e-06 config.num_epochs = 450 config.autoaugment = None config.randaug_magnitude = 10 config.randaug_num_layers = 2 config.cache = False config.half_precision = True config.drop_rate = 0.2 config.drop_path_rate = 0.2 config.num_train_steps = (- 1) config.steps_per_eval = (- 1) return config
def get_config(): config = default_lib.get_config() config.model = 'pt_efficientnet_b3' config.batch_size = 2048 config.eval_batch_size = 1000 config.ema_decay = 0.9999 config.num_epochs = 550 config.drop_rate = 0.3 return config
def get_config(): config = default_lib.get_config() config.batch_size = 500 return config
def checkpoint_metric(checkpoint_path): if ((not checkpoint_path) or (not os.path.isfile(checkpoint_path))): return {} print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path)) checkpoint = torch.load(checkpoint_path, map_location='cpu') metric = None if ('metric' in checkpoint): metric = checkpoint['metric'] return metric
def main(): args = parser.parse_args() args.use_ema = (not args.no_use_ema) args.sort = (not args.no_sort) if os.path.exists(args.output): print('Error: Output filename ({}) already exists.'.format(args.output)) exit(1) pattern = args.input if ((not args.input.endswith(os.path.sep)) and (not args.filter.startswith(os.path.sep))): pattern += os.path.sep pattern += args.filter checkpoints = glob.glob(pattern, recursive=True) if (not checkpoints): print('Error: No checkpoints to average.') exit(1) if args.sort: checkpoint_metrics = [] for c in checkpoints: metric = checkpoint_metric(c) if (metric is not None): checkpoint_metrics.append((metric, c)) checkpoint_metrics = list(sorted(checkpoint_metrics, reverse=(not args.descending))) checkpoint_metrics = checkpoint_metrics[:args.n] print('Selected checkpoints:') [print(m, c) for (m, c) in checkpoint_metrics] avg_checkpoints = [c for (m, c) in checkpoint_metrics] else: avg_checkpoints = checkpoints print('Selected checkpoints:') [print(c) for c in checkpoints] avg_state_dict = {} avg_counts = {} for c in avg_checkpoints: new_state_dict = load_state_dict(c, args.use_ema) if (not new_state_dict): print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint)) continue for (k, v) in new_state_dict.items(): if (k not in avg_state_dict): avg_state_dict[k] = v.clone().to(dtype=torch.float64) avg_counts[k] = 1 else: avg_state_dict[k] += v.to(dtype=torch.float64) avg_counts[k] += 1 for (k, v) in avg_state_dict.items(): v.div_(avg_counts[k]) float32_info = torch.finfo(torch.float32) final_state_dict = {} for (k, v) in avg_state_dict.items(): v = v.clamp(float32_info.min, float32_info.max) final_state_dict[k] = v.to(dtype=torch.float32) try: torch.save(final_state_dict, args.output, _use_new_zipfile_serialization=False) except: torch.save(final_state_dict, args.output) with open(args.output, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() print("=> Saved state_dict to '{}, SHA256: {}'".format(args.output, sha_hash))
def main(): args = parser.parse_args() if os.path.exists(args.output): print('Error: Output filename ({}) already exists.'.format(args.output)) exit(1) if (args.checkpoint and os.path.isfile(args.checkpoint)): print("=> Loading checkpoint '{}'".format(args.checkpoint)) checkpoint = torch.load(args.checkpoint, map_location='cpu') new_state_dict = OrderedDict() if isinstance(checkpoint, dict): state_dict_key = ('state_dict_ema' if args.use_ema else 'state_dict') if (state_dict_key in checkpoint): state_dict = checkpoint[state_dict_key] else: state_dict = checkpoint else: assert False for (k, v) in state_dict.items(): if (args.clean_aux_bn and ('aux_bn' in k)): continue name = (k[7:] if k.startswith('module') else k) new_state_dict[name] = v print("=> Loaded state_dict from '{}'".format(args.checkpoint)) try: torch.save(new_state_dict, _TEMP_NAME, _use_new_zipfile_serialization=False) except: torch.save(new_state_dict, _TEMP_NAME) with open(_TEMP_NAME, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() if args.output: (checkpoint_root, checkpoint_base) = os.path.split(args.output) checkpoint_base = os.path.splitext(checkpoint_base)[0] else: checkpoint_root = '' checkpoint_base = os.path.splitext(args.checkpoint)[0] final_filename = ('-'.join([checkpoint_base, sha_hash[:8]]) + '.pth') shutil.move(_TEMP_NAME, os.path.join(checkpoint_root, final_filename)) print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash)) else: print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint))
def _post_process(cls_outputs: List[torch.Tensor], box_outputs: List[torch.Tensor], num_levels: int, num_classes: int, max_detection_points: int=5000): 'Selects top-k predictions.\n\n Post-proc code adapted from Tensorflow version at: https://github.com/google/automl/tree/master/efficientdet\n and optimized for PyTorch.\n\n Args:\n cls_outputs: an OrderDict with keys representing levels and values\n representing logits in [batch_size, height, width, num_anchors].\n\n box_outputs: an OrderDict with keys representing levels and values\n representing box regression targets in [batch_size, height, width, num_anchors * 4].\n\n num_levels (int): number of feature levels\n\n num_classes (int): number of output classes\n ' batch_size = cls_outputs[0].shape[0] cls_outputs_all = torch.cat([cls_outputs[level].permute(0, 2, 3, 1).reshape([batch_size, (- 1), num_classes]) for level in range(num_levels)], 1) box_outputs_all = torch.cat([box_outputs[level].permute(0, 2, 3, 1).reshape([batch_size, (- 1), 4]) for level in range(num_levels)], 1) (_, cls_topk_indices_all) = torch.topk(cls_outputs_all.reshape(batch_size, (- 1)), dim=1, k=max_detection_points) indices_all = (cls_topk_indices_all // num_classes) classes_all = (cls_topk_indices_all % num_classes) box_outputs_all_after_topk = torch.gather(box_outputs_all, 1, indices_all.unsqueeze(2).expand((- 1), (- 1), 4)) cls_outputs_all_after_topk = torch.gather(cls_outputs_all, 1, indices_all.unsqueeze(2).expand((- 1), (- 1), num_classes)) cls_outputs_all_after_topk = torch.gather(cls_outputs_all_after_topk, 2, classes_all.unsqueeze(2)) return (cls_outputs_all_after_topk, box_outputs_all_after_topk, indices_all, classes_all)
@torch.jit.script def _batch_detection(batch_size: int, class_out, box_out, anchor_boxes, indices, classes, img_scale: Optional[torch.Tensor]=None, img_size: Optional[torch.Tensor]=None, max_det_per_image: int=100, soft_nms: bool=False): batch_detections = [] for i in range(batch_size): img_scale_i = (None if (img_scale is None) else img_scale[i]) img_size_i = (None if (img_size is None) else img_size[i]) detections = generate_detections(class_out[i], box_out[i], anchor_boxes, indices[i], classes[i], img_scale_i, img_size_i, max_det_per_image=max_det_per_image, soft_nms=soft_nms) batch_detections.append(detections) return torch.stack(batch_detections, dim=0)
class DetBenchPredict(nn.Module): def __init__(self, model): super(DetBenchPredict, self).__init__() self.model = model self.config = model.config self.num_levels = model.config.num_levels self.num_classes = model.config.num_classes self.anchors = Anchors.from_config(model.config) self.max_detection_points = model.config.max_detection_points self.max_det_per_image = model.config.max_det_per_image self.soft_nms = model.config.soft_nms def forward(self, x, img_info: Optional[Dict[(str, torch.Tensor)]]=None): (class_out, box_out) = self.model(x) (class_out, box_out, indices, classes) = _post_process(class_out, box_out, num_levels=self.num_levels, num_classes=self.num_classes, max_detection_points=self.max_detection_points) if (img_info is None): (img_scale, img_size) = (None, None) else: (img_scale, img_size) = (img_info['img_scale'], img_info['img_size']) return _batch_detection(x.shape[0], class_out, box_out, self.anchors.boxes, indices, classes, img_scale, img_size, max_det_per_image=self.max_det_per_image, soft_nms=self.soft_nms)
class DetBenchTrain(nn.Module): def __init__(self, model, create_labeler=True): super(DetBenchTrain, self).__init__() self.model = model self.config = model.config self.num_levels = model.config.num_levels self.num_classes = model.config.num_classes self.anchors = Anchors.from_config(model.config) self.max_detection_points = model.config.max_detection_points self.max_det_per_image = model.config.max_det_per_image self.soft_nms = model.config.soft_nms self.anchor_labeler = None if create_labeler: self.anchor_labeler = AnchorLabeler(self.anchors, self.num_classes, match_threshold=0.5) self.loss_fn = DetectionLoss(model.config) def forward(self, x, target: Dict[(str, torch.Tensor)]): (class_out, box_out) = self.model(x) if (self.anchor_labeler is None): assert ('label_num_positives' in target) cls_targets = [target[f'label_cls_{l}'] for l in range(self.num_levels)] box_targets = [target[f'label_bbox_{l}'] for l in range(self.num_levels)] num_positives = target['label_num_positives'] else: (cls_targets, box_targets, num_positives) = self.anchor_labeler.batch_label_anchors(target['bbox'], target['cls']) (loss, class_loss, box_loss) = self.loss_fn(class_out, box_out, cls_targets, box_targets, num_positives) output = {'loss': loss, 'class_loss': class_loss, 'box_loss': box_loss} if (not self.training): (class_out_pp, box_out_pp, indices, classes) = _post_process(class_out, box_out, num_levels=self.num_levels, num_classes=self.num_classes, max_detection_points=self.max_detection_points) output['detections'] = _batch_detection(x.shape[0], class_out_pp, box_out_pp, self.anchors.boxes, indices, classes, target['img_scale'], target['img_size'], max_det_per_image=self.max_det_per_image, soft_nms=self.soft_nms) return output
def unwrap_bench(model): if hasattr(model, 'module'): return unwrap_bench(model.module) elif hasattr(model, 'model'): return unwrap_bench(model.model) else: return model
def set_config_readonly(conf): OmegaConf.set_readonly(conf, True)
def set_config_writeable(conf): OmegaConf.set_readonly(conf, False)
def bifpn_config(min_level, max_level, weight_method=None): 'BiFPN config.\n Adapted from https://github.com/google/automl/blob/56815c9986ffd4b508fe1d68508e268d129715c1/efficientdet/keras/fpn_configs.py\n ' p = OmegaConf.create() weight_method = (weight_method or 'fastattn') num_levels = ((max_level - min_level) + 1) node_ids = {(min_level + i): [i] for i in range(num_levels)} level_last_id = (lambda level: node_ids[level][(- 1)]) level_all_ids = (lambda level: node_ids[level]) id_cnt = itertools.count(num_levels) p.nodes = [] for i in range((max_level - 1), (min_level - 1), (- 1)): p.nodes.append({'feat_level': i, 'inputs_offsets': [level_last_id(i), level_last_id((i + 1))], 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) for i in range((min_level + 1), (max_level + 1)): p.nodes.append({'feat_level': i, 'inputs_offsets': (level_all_ids(i) + [level_last_id((i - 1))]), 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) return p
def panfpn_config(min_level, max_level, weight_method=None): 'PAN FPN config.\n\n This defines FPN layout from Path Aggregation Networks as an alternate to\n BiFPN, it does not implement the full PAN spec.\n\n Paper: https://arxiv.org/abs/1803.01534\n ' p = OmegaConf.create() weight_method = (weight_method or 'fastattn') num_levels = ((max_level - min_level) + 1) node_ids = {(min_level + i): [i] for i in range(num_levels)} level_last_id = (lambda level: node_ids[level][(- 1)]) id_cnt = itertools.count(num_levels) p.nodes = [] for i in range(max_level, (min_level - 1), (- 1)): offsets = ([level_last_id(i), level_last_id((i + 1))] if (i != max_level) else [level_last_id(i)]) p.nodes.append({'feat_level': i, 'inputs_offsets': offsets, 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) for i in range(min_level, (max_level + 1)): offsets = ([level_last_id(i), level_last_id((i - 1))] if (i != min_level) else [level_last_id(i)]) p.nodes.append({'feat_level': i, 'inputs_offsets': offsets, 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) return p
def qufpn_config(min_level, max_level, weight_method=None): 'A dynamic quad fpn config that can adapt to different min/max levels.\n\n It extends the idea of BiFPN, and has four paths:\n (up_down -> bottom_up) + (bottom_up -> up_down).\n\n Paper: https://ieeexplore.ieee.org/document/9225379\n Ref code: From contribution to TF EfficientDet\n https://github.com/google/automl/blob/eb74c6739382e9444817d2ad97c4582dbe9a9020/efficientdet/keras/fpn_configs.py\n ' p = OmegaConf.create() weight_method = (weight_method or 'fastattn') quad_method = 'fastattn' num_levels = ((max_level - min_level) + 1) node_ids = {(min_level + i): [i] for i in range(num_levels)} level_last_id = (lambda level: node_ids[level][(- 1)]) level_all_ids = (lambda level: node_ids[level]) level_first_id = (lambda level: node_ids[level][0]) id_cnt = itertools.count(num_levels) p.nodes = [] for i in range((max_level - 1), (min_level - 1), (- 1)): p.nodes.append({'feat_level': i, 'inputs_offsets': [level_last_id(i), level_last_id((i + 1))], 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) node_ids[max_level].append(node_ids[max_level][(- 1)]) for i in range((min_level + 1), max_level): p.nodes.append({'feat_level': i, 'inputs_offsets': (level_all_ids(i) + [level_last_id((i - 1))]), 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) i = max_level p.nodes.append({'feat_level': i, 'inputs_offsets': ([level_first_id(i)] + [level_last_id((i - 1))]), 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) node_ids[min_level].append(node_ids[min_level][(- 1)]) for i in range((min_level + 1), (max_level + 1), 1): p.nodes.append({'feat_level': i, 'inputs_offsets': [level_first_id(i), (level_last_id((i - 1)) if (i != (min_level + 1)) else level_first_id((i - 1)))], 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) node_ids[min_level].append(node_ids[min_level][(- 1)]) for i in range((max_level - 1), min_level, (- 1)): p.nodes.append({'feat_level': i, 'inputs_offsets': (([node_ids[i][0]] + [node_ids[i][(- 1)]]) + [level_last_id((i + 1))]), 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) i = min_level p.nodes.append({'feat_level': i, 'inputs_offsets': ([node_ids[i][0]] + [level_last_id((i + 1))]), 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) node_ids[max_level].append(node_ids[max_level][(- 1)]) for i in range(min_level, (max_level + 1)): p.nodes.append({'feat_level': i, 'inputs_offsets': [node_ids[i][2], node_ids[i][4]], 'weight_method': quad_method}) node_ids[i].append(next(id_cnt)) return p
def get_fpn_config(fpn_name, min_level=3, max_level=7): if (not fpn_name): fpn_name = 'bifpn_fa' name_to_config = {'bifpn_sum': bifpn_config(min_level=min_level, max_level=max_level, weight_method='sum'), 'bifpn_attn': bifpn_config(min_level=min_level, max_level=max_level, weight_method='attn'), 'bifpn_fa': bifpn_config(min_level=min_level, max_level=max_level, weight_method='fastattn'), 'pan_sum': panfpn_config(min_level=min_level, max_level=max_level, weight_method='sum'), 'pan_fa': panfpn_config(min_level=min_level, max_level=max_level, weight_method='fastattn'), 'qufpn_sum': qufpn_config(min_level=min_level, max_level=max_level, weight_method='sum'), 'qufpn_fa': qufpn_config(min_level=min_level, max_level=max_level, weight_method='fastattn')} return name_to_config[fpn_name]
def default_detection_model_configs(): 'Returns a default detection configs.' h = OmegaConf.create() h.name = 'tf_efficientdet_d1' h.backbone_name = 'tf_efficientnet_b1' h.backbone_args = None h.backbone_indices = None h.image_size = (640, 640) h.num_classes = 90 h.min_level = 3 h.max_level = 7 h.num_levels = ((h.max_level - h.min_level) + 1) h.num_scales = 3 h.aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)] h.anchor_scale = 4.0 h.pad_type = 'same' h.act_type = 'swish' h.norm_layer = None h.norm_kwargs = dict(eps=0.001, momentum=0.01) h.box_class_repeats = 3 h.fpn_cell_repeats = 3 h.fpn_channels = 88 h.separable_conv = True h.apply_resample_bn = True h.conv_bn_relu_pattern = False h.downsample_type = 'max' h.upsample_type = 'nearest' h.redundant_bias = True h.head_bn_level_first = False h.head_act_type = None h.fpn_name = None h.fpn_config = None h.fpn_drop_path_rate = 0.0 h.alpha = 0.25 h.gamma = 1.5 h.label_smoothing = 0.0 h.legacy_focal = False h.jit_loss = False h.delta = 0.1 h.box_loss_weight = 50.0 h.soft_nms = False h.max_detection_points = 5000 h.max_det_per_image = 100 return h
def get_efficientdet_config(model_name='tf_efficientdet_d1'): 'Get the default config for EfficientDet based on model name.' h = default_detection_model_configs() h.update(efficientdet_model_param_dict[model_name]) h.num_levels = ((h.max_level - h.min_level) + 1) h = deepcopy(h) return h
def default_detection_train_config(): h = OmegaConf.create() h.skip_crowd_during_training = True h.input_rand_hflip = True h.train_scale_min = 0.1 h.train_scale_max = 2.0 h.autoaugment_policy = None h.momentum = 0.9 h.learning_rate = 0.08 h.lr_warmup_init = 0.008 h.lr_warmup_epoch = 1.0 h.first_lr_drop_epoch = 200.0 h.second_lr_drop_epoch = 250.0 h.clip_gradients_norm = 10.0 h.num_epochs = 300 h.weight_decay = 4e-05 h.lr_decay_method = 'cosine' h.moving_average_decay = 0.9998 h.ckpt_var_scope = None return h
class DetectionDatset(data.Dataset): '`Object Detection Dataset. Use with parsers for COCO, VOC, and OpenImages.\n Args:\n parser (string, Parser):\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.ToTensor``\n\n ' def __init__(self, data_dir, parser=None, parser_kwargs=None, transform=None): super(DetectionDatset, self).__init__() parser_kwargs = (parser_kwargs or {}) self.data_dir = data_dir if isinstance(parser, str): self._parser = create_parser(parser, **parser_kwargs) else: assert ((parser is not None) and len(parser.img_ids)) self._parser = parser self._transform = transform def __getitem__(self, index): '\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, annotations (target)).\n ' img_info = self._parser.img_infos[index] target = dict(img_idx=index, img_size=(img_info['width'], img_info['height'])) if self._parser.has_labels: ann = self._parser.get_ann_info(index) target.update(ann) img_path = (self.data_dir / img_info['file_name']) img = Image.open(img_path).convert('RGB') if (self.transform is not None): (img, target) = self.transform(img, target) return (img, target) def __len__(self): return len(self._parser.img_ids) @property def parser(self): return self._parser @property def transform(self): return self._transform @transform.setter def transform(self, t): self._transform = t
class SkipSubset(data.Dataset): '\n Subset of a dataset at specified indices.\n\n Arguments:\n dataset (Dataset): The whole Dataset\n n (int): skip rate (select every nth)\n ' def __init__(self, dataset, n=2): self.dataset = dataset assert (n >= 1) self.indices = np.arange(len(dataset))[::n] def __getitem__(self, idx): return self.dataset[self.indices[idx]] def __len__(self): return len(self.indices) @property def parser(self): return self.dataset.parser @property def transform(self): return self.dataset.transform @transform.setter def transform(self, t): self.dataset.transform = t
@dataclass class CocoCfg(): variant: str = None parser: str = 'coco' num_classes: int = 80 splits: Dict[(str, dict)] = None
@dataclass class Coco2017Cfg(CocoCfg): variant: str = '2017' splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(ann_filename='annotations/instances_train2017.json', img_dir='train2017', has_labels=True), val=dict(ann_filename='annotations/instances_val2017.json', img_dir='val2017', has_labels=True), test=dict(ann_filename='annotations/image_info_test2017.json', img_dir='test2017', has_labels=False), testdev=dict(ann_filename='annotations/image_info_test-dev2017.json', img_dir='test2017', has_labels=False))))
@dataclass class Coco2014Cfg(CocoCfg): variant: str = '2014' splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(ann_filename='annotations/instances_train2014.json', img_dir='train2014', has_labels=True), val=dict(ann_filename='annotations/instances_val2014.json', img_dir='val2014', has_labels=True), test=dict(ann_filename='', img_dir='test2014', has_labels=False))))
@dataclass class VocCfg(): variant: str = None parser: str = 'voc' num_classes: int = 80 img_filename: str = '%s.jpg' splits: Dict[(str, dict)] = None
@dataclass class Voc2007Cfg(VocCfg): variant: str = '2007' splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(split_filename='VOC2007/ImageSets/Main/train.txt', ann_filename='VOC2007/Annotations/%s.xml', img_dir='VOC2007/JPEGImages'), val=dict(split_filename='VOC2007/ImageSets/Main/val.txt', ann_filename='VOC2007/Annotations/%s.xml', img_dir='VOC2007/JPEGImages'))))
@dataclass class Voc2012Cfg(VocCfg): variant: str = '2012' splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(split_filename='VOC2012/ImageSets/Main/train.txt', ann_filename='VOC2012/Annotations/%s.xml', img_dir='VOC2012/JPEGImages'), val=dict(split_filename='VOC2012/ImageSets/Main/val.txt', ann_filename='VOC2012/Annotations/%s.xml', img_dir='VOC2012/JPEGImages'))))
@dataclass class Voc0712Cfg(VocCfg): variant: str = '0712' splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(split_filename=['VOC2007/ImageSets/Main/trainval.txt', 'VOC2012/ImageSets/Main/trainval.txt'], ann_filename=['VOC2007/Annotations/%s.xml', 'VOC2012/Annotations/%s.xml'], img_dir=['VOC2007/JPEGImages', 'VOC2012/JPEGImages']), val=dict(split_filename='VOC2007/ImageSets/Main/test.txt', ann_filename='VOC2007/Annotations/%s.xml', img_dir='VOC2007/JPEGImages'))))
@dataclass class OpenImagesCfg(): variant: str = None parser: str = 'openimages' num_classes: int = None img_filename = '%s.jpg' splits: Dict[(str, dict)] = None
@dataclass class OpenImagesObjCfg(OpenImagesCfg): num_classes: int = 601 categories_map: str = 'annotations/class-descriptions-boxable.csv'
@dataclass class OpenImagesSegCfg(OpenImagesCfg): num_classes: int = 350 categories_map: str = 'annotations/classes-segmentation.txt'
@dataclass class OpenImagesObjV5Cfg(OpenImagesObjCfg): splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(img_dir='train', img_info='annotations/train-info.csv', has_labels=True, prefix_levels=1, ann_bbox='annotations/train-annotations-bbox.csv', ann_img_label='annotations/train-annotations-human-imagelabels-boxable.csv'), val=dict(img_dir='validation', img_info='annotations/validation-info.csv', has_labels=True, prefix_levels=0, ann_bbox='annotations/validation-annotations-bbox.csv', ann_img_label='annotations/validation-annotations-human-imagelabels-boxable.csv'), test=dict(img_dir='test', img_info='', has_labels=True, prefix_levels=0, ann_bbox='annotations/test-annotations-bbox.csv', ann_img_label='annotations/test-annotations-human-imagelabels-boxable.csv'))))
@dataclass class OpenImagesObjChallenge2019Cfg(OpenImagesObjCfg): num_classes: int = 500 categories_map: str = 'annotations/challenge-2019/challenge-2019-classes-description-500.csv' splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(img_dir='train', img_info='annotations/train-info.csv', has_labels=True, prefix_levels=1, ann_bbox='annotations/challenge-2019/challenge-2019-train-detection-bbox.csv', ann_img_label='annotations/challenge-2019/challenge-2019-train-detection-human-imagelabels.csv'), val=dict(img_dir='validation', img_info='annotations/validation-info.csv', has_labels=True, prefix_levels=0, ann_bbox='annotations/challenge-2019/challenge-2019-validation-detection-bbox.csv', ann_img_label='annotations/challenge-2019/challenge-2019-validation-detection-human-imagelabels.csv'), test=dict(img_dir='challenge2019', img_info='annotations/challenge-2019/challenge2019-info', prefix_levels=0, has_labels=False, ann_bbox='', ann_img_label=''))))
@dataclass class OpenImagesSegV5Cfg(OpenImagesSegCfg): num_classes: int = 300 splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(), val=dict(), test=dict())))
@dataclass class OpenImagesSegChallenge2019Cfg(OpenImagesSegCfg): num_classes: int = 300 ann_class_map: str = 'annotations/challenge-2019/challenge-2019-classes-description-segmentable.csv' splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(), val=dict(), test=dict())))
def create_dataset(name, root, splits=('train', 'val')): if isinstance(splits, str): splits = (splits,) name = name.lower() root = Path(root) dataset_cls = DetectionDatset datasets = OrderedDict() if name.startswith('coco'): if ('coco2014' in name): dataset_cfg = Coco2014Cfg() else: dataset_cfg = Coco2017Cfg() for s in splits: if (s not in dataset_cfg.splits): raise RuntimeError(f'{s} split not found in config') split_cfg = dataset_cfg.splits[s] ann_file = (root / split_cfg['ann_filename']) parser_cfg = CocoParserCfg(ann_filename=ann_file, has_labels=split_cfg['has_labels']) datasets[s] = dataset_cls(data_dir=(root / Path(split_cfg['img_dir'])), parser=create_parser(dataset_cfg.parser, cfg=parser_cfg)) elif name.startswith('voc'): if ('voc0712' in name): dataset_cfg = Voc0712Cfg() elif ('voc2007' in name): dataset_cfg = Voc2007Cfg() else: dataset_cfg = Voc2012Cfg() for s in splits: if (s not in dataset_cfg.splits): raise RuntimeError(f'{s} split not found in config') split_cfg = dataset_cfg.splits[s] if isinstance(split_cfg['split_filename'], (tuple, list)): assert (len(split_cfg['split_filename']) == len(split_cfg['ann_filename'])) parser = None for (sf, af, id) in zip(split_cfg['split_filename'], split_cfg['ann_filename'], split_cfg['img_dir']): parser_cfg = VocParserCfg(split_filename=(root / sf), ann_filename=os.path.join(root, af), img_filename=os.path.join(id, dataset_cfg.img_filename)) if (parser is None): parser = create_parser(dataset_cfg.parser, cfg=parser_cfg) else: other_parser = create_parser(dataset_cfg.parser, cfg=parser_cfg) parser.merge(other=other_parser) else: parser_cfg = VocParserCfg(split_filename=(root / split_cfg['split_filename']), ann_filename=os.path.join(root, split_cfg['ann_filename']), img_filename=os.path.join(split_cfg['img_dir'], dataset_cfg.img_filename)) parser = create_parser(dataset_cfg.parser, cfg=parser_cfg) datasets[s] = dataset_cls(data_dir=root, parser=parser) elif name.startswith('openimages'): if ('challenge2019' in name): dataset_cfg = OpenImagesObjChallenge2019Cfg() else: dataset_cfg = OpenImagesObjV5Cfg() for s in splits: if (s not in dataset_cfg.splits): raise RuntimeError(f'{s} split not found in config') split_cfg = dataset_cfg.splits[s] parser_cfg = OpenImagesParserCfg(categories_filename=(root / dataset_cfg.categories_map), img_info_filename=(root / split_cfg['img_info']), bbox_filename=(root / split_cfg['ann_bbox']), img_label_filename=(root / split_cfg['ann_img_label']), img_filename=dataset_cfg.img_filename, prefix_levels=split_cfg['prefix_levels'], has_labels=split_cfg['has_labels']) datasets[s] = dataset_cls(data_dir=(root / Path(split_cfg['img_dir'])), parser=create_parser(dataset_cfg.parser, cfg=parser_cfg)) else: assert False, f'Unknown dataset parser ({name})' datasets = list(datasets.values()) return (datasets if (len(datasets) > 1) else datasets[0])
def resolve_input_config(args, model_config=None, model=None): if (not isinstance(args, dict)): args = vars(args) input_config = {} if ((not model_config) and (model is not None) and hasattr(model, 'config')): model_config = model.config in_chans = 3 input_size = (in_chans, 512, 512) if ('input_size' in model_config): input_size = tuple(model_config['input_size']) elif ('image_size' in model_config): input_size = ((in_chans,) + tuple(model_config['image_size'])) assert (isinstance(input_size, tuple) and (len(input_size) == 3)) input_config['input_size'] = input_size input_config['interpolation'] = 'bicubic' if (('interpolation' in args) and args['interpolation']): input_config['interpolation'] = args['interpolation'] elif ('interpolation' in model_config): input_config['interpolation'] = model_config['interpolation'] input_config['mean'] = IMAGENET_DEFAULT_MEAN if (('mean' in args) and (args['mean'] is not None)): mean = tuple(args['mean']) if (len(mean) == 1): mean = tuple((list(mean) * in_chans)) else: assert (len(mean) == in_chans) input_config['mean'] = mean elif ('mean' in model_config): input_config['mean'] = model_config['mean'] input_config['std'] = IMAGENET_DEFAULT_STD if (('std' in args) and (args['std'] is not None)): std = tuple(args['std']) if (len(std) == 1): std = tuple((list(std) * in_chans)) else: assert (len(std) == in_chans) input_config['std'] = std elif ('std' in model_config): input_config['std'] = model_config['std'] input_config['fill_color'] = 'mean' if (('fill_color' in args) and (args['fill_color'] is not None)): input_config['fill_color'] = args['fill_color'] elif ('fill_color' in model_config): input_config['fill_color'] = model_config['fill_color'] return input_config
class Parser(): ' Parser base class.\n\n The attributes listed below make up a public interface common to all parsers. They can be accessed directly\n once the dataset is constructed and annotations are populated.\n\n Attributes:\n\n cat_names (list[str]):\n list of category (class) names, with background class at position 0.\n cat_ids (list[union[str, int]):\n list of dataset specific, unique integer or string category ids, does not include background\n cat_id_to_label (dict):\n map from category id to integer 1-indexed class label\n\n img_ids (list):\n list of dataset specific, unique image ids corresponding to valid samples in dataset\n img_ids_invalid (list):\n list of image ids corresponding to invalid images, not used as samples\n img_infos (list[dict]):\n image info, list of info dicts with filename, width, height for each image sample\n ' def __init__(self, bbox_yxyx: bool=False, has_labels: bool=True, include_masks: bool=False, include_bboxes_ignore: bool=False, ignore_empty_gt: bool=False, min_img_size: int=32): '\n Args:\n yxyx (bool): output coords in yxyx format, otherwise xyxy\n has_labels (bool): dataset has labels (for training validation, False usually for test sets)\n include_masks (bool): include segmentation masks in target output (not supported yet for any dataset)\n include_bboxes_ignore (bool): include ignored bbox in target output\n ignore_empty_gt (bool): ignore images with no ground truth (no negative images)\n min_img_size (bool): ignore images with width or height smaller than this number\n sub_sample (int): sample every N images from the dataset\n ' self.yxyx = bbox_yxyx self.has_labels = has_labels self.include_masks = include_masks self.include_bboxes_ignore = include_bboxes_ignore self.ignore_empty_gt = ignore_empty_gt self.min_img_size = min_img_size self.label_offset = 1 self.cat_names: List[str] = [] self.cat_ids: List[Union[(str, Integral)]] = [] self.cat_id_to_label: Dict[(Union[(str, Integral)], Integral)] = dict() self.img_ids: List[Union[(str, Integral)]] = [] self.img_ids_invalid: List[Union[(str, Integral)]] = [] self.img_infos: List[Dict[(str, Any)]] = [] @property def cat_dicts(self): 'return category names and labels in format compatible with TF Models Evaluator\n list[dict(name=<class name>, id=<class label>)]\n ' return [dict(name=name, id=(cat_id if (not self.cat_id_to_label) else self.cat_id_to_label[cat_id])) for (name, cat_id) in zip(self.cat_names, self.cat_ids)] @property def max_label(self): if self.cat_id_to_label: return max(self.cat_id_to_label.values()) else: assert (len(self.cat_ids) and isinstance(self.cat_ids[0], Integral)) return max(self.cat_ids)
class CocoParser(Parser): def __init__(self, cfg: CocoParserCfg): super().__init__(bbox_yxyx=cfg.bbox_yxyx, has_labels=cfg.has_labels, include_masks=cfg.include_masks, include_bboxes_ignore=cfg.include_bboxes_ignore, ignore_empty_gt=(cfg.has_labels and cfg.ignore_empty_gt), min_img_size=cfg.min_img_size) self.cat_ids_as_labels = True self.coco = None self._load_annotations(cfg.ann_filename) def get_ann_info(self, idx): img_id = self.img_ids[idx] return self._parse_img_ann(img_id) def _load_annotations(self, ann_file): assert (self.coco is None) self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat_names = [c['name'] for c in self.coco.loadCats(ids=self.cat_ids)] if (not self.cat_ids_as_labels): self.cat_id_to_label = {cat_id: (i + self.label_offset) for (i, cat_id) in enumerate(self.cat_ids)} img_ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values())) for img_id in sorted(self.coco.imgs.keys()): info = self.coco.loadImgs([img_id])[0] if ((min(info['width'], info['height']) < self.min_img_size) or (self.ignore_empty_gt and (img_id not in img_ids_with_ann))): self.img_ids_invalid.append(img_id) continue self.img_ids.append(img_id) self.img_infos.append(info) def _parse_img_ann(self, img_id): ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) bboxes = [] bboxes_ignore = [] cls = [] for (i, ann) in enumerate(ann_info): if ann.get('ignore', False): continue (x1, y1, w, h) = ann['bbox'] if (self.include_masks and (ann['area'] <= 0)): continue if ((w < 1) or (h < 1)): continue if self.yxyx: bbox = [y1, x1, (y1 + h), (x1 + w)] else: bbox = [x1, y1, (x1 + w), (y1 + h)] if ann.get('iscrowd', False): if self.include_bboxes_ignore: bboxes_ignore.append(bbox) else: bboxes.append(bbox) cls.append((self.cat_id_to_label[ann['category_id']] if self.cat_id_to_label else ann['category_id'])) if bboxes: bboxes = np.array(bboxes, ndmin=2, dtype=np.float32) cls = np.array(cls, dtype=np.int64) else: bboxes = np.zeros((0, 4), dtype=np.float32) cls = np.array([], dtype=np.int64) if self.include_bboxes_ignore: if bboxes_ignore: bboxes_ignore = np.array(bboxes_ignore, ndmin=2, dtype=np.float32) else: bboxes_ignore = np.zeros((0, 4), dtype=np.float32) ann = dict(bbox=bboxes, cls=cls) if self.include_bboxes_ignore: ann['bbox_ignore'] = bboxes_ignore return ann
@dataclass class CocoParserCfg(): ann_filename: str include_masks: bool = False include_bboxes_ignore: bool = False has_labels: bool = True bbox_yxyx: bool = True min_img_size: int = 32 ignore_empty_gt: bool = False
@dataclass class VocParserCfg(): split_filename: str ann_filename: str img_filename: str = '%.jpg' keep_difficult: bool = True classes: list = None add_background: bool = True has_labels: bool = True bbox_yxyx: bool = True min_img_size: int = 32 ignore_empty_gt: bool = False
@dataclass class OpenImagesParserCfg(): categories_filename: str img_info_filename: str bbox_filename: str img_label_filename: str = '' masks_filename: str = '' img_filename: str = '%s.jpg' task: str = 'obj' prefix_levels: int = 1 add_background: bool = True has_labels: bool = True bbox_yxyx: bool = True min_img_size: int = 32 ignore_empty_gt: bool = False
def create_parser(name, **kwargs): if (name == 'coco'): parser = CocoParser(**kwargs) elif (name == 'voc'): parser = VocParser(**kwargs) elif (name == 'openimages'): parser = OpenImagesParser(**kwargs) else: assert False, f'Unknown dataset parser ({name})' return parser
class VocParser(Parser): DEFAULT_CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') def __init__(self, cfg: VocParserCfg): super().__init__(bbox_yxyx=cfg.bbox_yxyx, has_labels=cfg.has_labels, include_masks=False, include_bboxes_ignore=False, ignore_empty_gt=(cfg.has_labels and cfg.ignore_empty_gt), min_img_size=cfg.min_img_size) self.correct_bbox = 1 self.keep_difficult = cfg.keep_difficult self.anns = None self.img_id_to_idx = {} self._load_annotations(split_filename=cfg.split_filename, img_filename=cfg.img_filename, ann_filename=cfg.ann_filename, classes=cfg.classes) def _load_annotations(self, split_filename: str, img_filename: str, ann_filename: str, classes=None): classes = (classes or self.DEFAULT_CLASSES) self.cat_names = list(classes) self.cat_ids = self.cat_names self.cat_id_to_label = {cat: (i + self.label_offset) for (i, cat) in enumerate(self.cat_ids)} self.anns = [] with open(split_filename) as f: ids = f.readlines() for img_id in ids: img_id = img_id.strip('\n') filename = (img_filename % img_id) xml_path = (ann_filename % img_id) tree = ET.parse(xml_path) root = tree.getroot() size = root.find('size') width = int(size.find('width').text) height = int(size.find('height').text) if (min(width, height) < self.min_img_size): continue anns = [] for (obj_idx, obj) in enumerate(root.findall('object')): name = obj.find('name').text label = self.cat_id_to_label[name] difficult = int(obj.find('difficult').text) bnd_box = obj.find('bndbox') bbox = [int(bnd_box.find('xmin').text), int(bnd_box.find('ymin').text), int(bnd_box.find('xmax').text), int(bnd_box.find('ymax').text)] anns.append(dict(label=label, bbox=bbox, difficult=difficult)) if ((not self.ignore_empty_gt) or len(anns)): self.anns.append(anns) self.img_infos.append(dict(id=img_id, file_name=filename, width=width, height=height)) self.img_ids.append(img_id) else: self.img_ids_invalid.append(img_id) def merge(self, other): assert (len(self.cat_ids) == len(other.cat_ids)) self.img_ids.extend(other.img_ids) self.img_infos.extend(other.img_infos) self.anns.extend(other.anns) def get_ann_info(self, idx): return self._parse_ann_info(self.anns[idx]) def _parse_ann_info(self, ann_info): bboxes = [] labels = [] bboxes_ignore = [] labels_ignore = [] for ann in ann_info: ignore = False (x1, y1, x2, y2) = ann['bbox'] label = ann['label'] w = (x2 - x1) h = (y2 - y1) if ((w < 1) or (h < 1)): ignore = True if self.yxyx: bbox = [y1, x1, y2, x2] else: bbox = ann['bbox'] if (ignore or (ann['difficult'] and (not self.keep_difficult))): bboxes_ignore.append(bbox) labels_ignore.append(label) else: bboxes.append(bbox) labels.append(label) if (not bboxes): bboxes = np.zeros((0, 4), dtype=np.float32) labels = np.zeros((0,), dtype=np.float32) else: bboxes = (np.array(bboxes, ndmin=2, dtype=np.float32) - self.correct_bbox) labels = np.array(labels, dtype=np.float32) if self.include_bboxes_ignore: if (not bboxes_ignore): bboxes_ignore = np.zeros((0, 4), dtype=np.float32) labels_ignore = np.zeros((0,), dtype=np.float32) else: bboxes_ignore = (np.array(bboxes_ignore, ndmin=2, dtype=np.float32) - self.correct_bbox) labels_ignore = np.array(labels_ignore, dtype=np.float32) ann = dict(bbox=bboxes.astype(np.float32), cls=labels.astype(np.int64)) if self.include_bboxes_ignore: ann.update(dict(bbox_ignore=bboxes_ignore.astype(np.float32), cls_ignore=labels_ignore.astype(np.int64))) return ann
def get_world_size() -> int: if (not dist.is_available()): return 1 if (not dist.is_initialized()): return 1 return dist.get_world_size()
def get_rank() -> int: if (not dist.is_available()): return 0 if (not dist.is_initialized()): return 0 return dist.get_rank()
def get_local_rank() -> int: '\n Returns:\n The rank of the current process within the local (per-machine) process group.\n ' if (not dist.is_available()): return 0 if (not dist.is_initialized()): return 0 assert (_LOCAL_PROCESS_GROUP is not None) return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int: '\n Returns:\n The size of the per-machine process group,\n i.e. the number of processes per machine.\n ' if (not dist.is_available()): return 1 if (not dist.is_initialized()): return 1 return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool: return (get_rank() == 0)
def synchronize(): '\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n ' if (not dist.is_available()): return if (not dist.is_initialized()): return world_size = dist.get_world_size() if (world_size == 1): return dist.barrier()
@functools.lru_cache() def _get_global_gloo_group(): '\n Return a process group based on gloo backend, containing all the ranks\n The result is cached.\n ' if (dist.get_backend() == 'nccl'): return dist.new_group(backend='gloo') else: return dist.group.WORLD
def _serialize_to_tensor(data, group): backend = dist.get_backend(group) assert (backend in ['gloo', 'nccl']) device = torch.device(('cpu' if (backend == 'gloo') else 'cuda')) buffer = pickle.dumps(data) if (len(buffer) > (1024 ** 3)): logger = logging.getLogger(__name__) logger.warning('Rank {} trying to all-gather {:.2f} GB of data on device {}'.format(get_rank(), (len(buffer) / (1024 ** 3)), device)) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to(device=device) return tensor
def _pad_to_largest_tensor(tensor, group): '\n Returns:\n list[int]: size of the tensor, on each rank\n Tensor: padded tensor that has the max size\n ' world_size = dist.get_world_size(group=group) assert (world_size >= 1), 'comm.gather/all_gather must be called from ranks within the given group!' local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device) size_list = [torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size)] dist.all_gather(size_list, local_size, group=group) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) if (local_size != max_size): padding = torch.zeros(((max_size - local_size),), dtype=torch.uint8, device=tensor.device) tensor = torch.cat((tensor, padding), dim=0) return (size_list, tensor)
def all_gather(data, group=None): '\n Run all_gather on arbitrary picklable data (not necessarily tensors).\n Args:\n data: any picklable object\n group: a torch process group. By default, will use a group which\n contains all ranks on gloo backend.\n Returns:\n list[data]: list of data gathered from each rank\n ' if (get_world_size() == 1): return [data] if (group is None): group = _get_global_gloo_group() if (dist.get_world_size(group) == 1): return [data] tensor = _serialize_to_tensor(data, group) (size_list, tensor) = _pad_to_largest_tensor(tensor, group) max_size = max(size_list) tensor_list = [torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list] dist.all_gather(tensor_list, tensor, group=group) data_list = [] for (size, tensor) in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list
def gather(data, dst=0, group=None): '\n Run gather on arbitrary picklable data (not necessarily tensors).\n Args:\n data: any picklable object\n dst (int): destination rank\n group: a torch process group. By default, will use a group which\n contains all ranks on gloo backend.\n Returns:\n list[data]: on dst, a list of data gathered from each rank. Otherwise,\n an empty list.\n ' if (get_world_size() == 1): return [data] if (group is None): group = _get_global_gloo_group() if (dist.get_world_size(group=group) == 1): return [data] rank = dist.get_rank(group=group) tensor = _serialize_to_tensor(data, group) (size_list, tensor) = _pad_to_largest_tensor(tensor, group) if (rank == dst): max_size = max(size_list) tensor_list = [torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list] dist.gather(tensor, tensor_list, dst=dst, group=group) data_list = [] for (size, tensor) in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list else: dist.gather(tensor, [], dst=dst, group=group) return []
def shared_random_seed(): '\n Returns:\n int: a random number that is the same across all workers.\n If workers need a shared RNG, they can use this shared seed to\n create one.\n All workers must call this function, otherwise it will deadlock.\n ' ints = np.random.randint((2 ** 31)) all_ints = all_gather(ints) return all_ints[0]
def reduce_dict(input_dict, average=True): '\n Reduce the values in the dictionary from all processes so that process with rank\n 0 has the reduced results.\n Args:\n input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.\n average (bool): whether to do average or sum\n Returns:\n a dict with the same keys as input_dict, after reduction.\n ' world_size = get_world_size() if (world_size < 2): return input_dict with torch.no_grad(): names = [] values = [] for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.reduce(values, dst=0) if ((dist.get_rank() == 0) and average): values /= world_size reduced_dict = {k: v for (k, v) in zip(names, values)} return reduced_dict
def all_gather_container(container, group=None, cat_dim=0): group = (group or dist.group.WORLD) world_size = dist.get_world_size(group) def _do_gather(tensor): tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] dist.all_gather(tensor_list, tensor, group=group) return torch.cat(tensor_list, dim=cat_dim) if isinstance(container, dict): gathered = dict() for (k, v) in container.items(): v = _do_gather(v) gathered[k] = v return gathered elif isinstance(container, (list, tuple)): gathered = [_do_gather(v) for v in container] if isinstance(container, tuple): gathered = tuple(gathered) return gathered else: assert isinstance(container, torch.Tensor) return _do_gather(container)
def gather_container(container, dst, group=None, cat_dim=0): group = (group or dist.group.WORLD) world_size = dist.get_world_size(group) this_rank = dist.get_rank(group) def _do_gather(tensor): if (this_rank == dst): tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] else: tensor_list = None dist.gather(tensor, tensor_list, dst=dst, group=group) return torch.cat(tensor_list, dim=cat_dim) if isinstance(container, dict): gathered = dict() for (k, v) in container.items(): v = _do_gather(v) gathered[k] = v return gathered elif isinstance(container, (list, tuple)): gathered = [_do_gather(v) for v in container] if isinstance(container, tuple): gathered = tuple(gathered) return gathered else: assert isinstance(container, torch.Tensor) return _do_gather(container)
class SequentialList(nn.Sequential): ' This module exists to work around torchscript typing issues list -> list' def __init__(self, *args): super(SequentialList, self).__init__(*args) def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: for module in self: x = module(x) return x
class ConvBnAct2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding='', bias=False, norm_layer=nn.BatchNorm2d, act_layer=_ACT_LAYER): super(ConvBnAct2d, self).__init__() self.conv = create_conv2d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias) self.bn = (None if (norm_layer is None) else norm_layer(out_channels)) self.act = (None if (act_layer is None) else act_layer(inplace=True)) def forward(self, x): x = self.conv(x) if (self.bn is not None): x = self.bn(x) if (self.act is not None): x = self.act(x) return x
class SeparableConv2d(nn.Module): ' Separable Conv\n ' def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=_ACT_LAYER): super(SeparableConv2d, self).__init__() self.conv_dw = create_conv2d(in_channels, int((in_channels * channel_multiplier)), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d(int((in_channels * channel_multiplier)), out_channels, pw_kernel_size, padding=padding, bias=bias) self.bn = (None if (norm_layer is None) else norm_layer(out_channels)) self.act = (None if (act_layer is None) else act_layer(inplace=True)) def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) if (self.bn is not None): x = self.bn(x) if (self.act is not None): x = self.act(x) return x
class Interpolate2d(nn.Module): "Resamples a 2d Image\n\n The input data is assumed to be of the form\n `minibatch x channels x [optional depth] x [optional height] x width`.\n Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.\n\n The algorithms available for upsampling are nearest neighbor and linear,\n bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor,\n respectively.\n\n One can either give a :attr:`scale_factor` or the target output :attr:`size` to\n calculate the output size. (You cannot give both, as it is ambiguous)\n\n Args:\n size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional):\n output spatial sizes\n scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional):\n multiplier for spatial size. Has to match input size if it is a tuple.\n mode (str, optional): the upsampling algorithm: one of ``'nearest'``,\n ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``.\n Default: ``'nearest'``\n align_corners (bool, optional): if ``True``, the corner pixels of the input\n and output tensors are aligned, and thus preserving the values at\n those pixels. This only has effect when :attr:`mode` is\n ``'linear'``, ``'bilinear'``, or ``'trilinear'``. Default: ``False``\n " __constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name'] name: str size: Optional[Union[(int, Tuple[(int, int)])]] scale_factor: Optional[Union[(float, Tuple[(float, float)])]] mode: str align_corners: Optional[bool] def __init__(self, size: Optional[Union[(int, Tuple[(int, int)])]]=None, scale_factor: Optional[Union[(float, Tuple[(float, float)])]]=None, mode: str='nearest', align_corners: bool=False) -> None: super(Interpolate2d, self).__init__() self.name = type(self).__name__ self.size = size if isinstance(scale_factor, tuple): self.scale_factor = tuple((float(factor) for factor in scale_factor)) else: self.scale_factor = (float(scale_factor) if scale_factor else None) self.mode = mode self.align_corners = (None if (mode == 'nearest') else align_corners) def forward(self, input: torch.Tensor) -> torch.Tensor: return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners, recompute_scale_factor=False)
class ResampleFeatureMap(nn.Sequential): def __init__(self, in_channels, out_channels, input_size, output_size, pad_type='', downsample=None, upsample=None, norm_layer=nn.BatchNorm2d, apply_bn=False, redundant_bias=False): super(ResampleFeatureMap, self).__init__() downsample = (downsample or 'max') upsample = (upsample or 'nearest') self.in_channels = in_channels self.out_channels = out_channels self.input_size = input_size self.output_size = output_size if (in_channels != out_channels): self.add_module('conv', ConvBnAct2d(in_channels, out_channels, kernel_size=1, padding=pad_type, norm_layer=(norm_layer if apply_bn else None), bias=((not apply_bn) or redundant_bias), act_layer=None)) if ((input_size[0] > output_size[0]) and (input_size[1] > output_size[1])): if (downsample in ('max', 'avg')): stride_size_h = int((((input_size[0] - 1) // output_size[0]) + 1)) stride_size_w = int((((input_size[1] - 1) // output_size[1]) + 1)) if (stride_size_h == stride_size_w): kernel_size = (stride_size_h + 1) stride = stride_size_h else: kernel_size = ((stride_size_h + 1), (stride_size_w + 1)) stride = (stride_size_h, stride_size_w) down_inst = create_pool2d(downsample, kernel_size=kernel_size, stride=stride, padding=pad_type) elif _USE_SCALE: scale = ((output_size[0] / input_size[0]), (output_size[1] / input_size[1])) down_inst = Interpolate2d(scale_factor=scale, mode=downsample) else: down_inst = Interpolate2d(size=output_size, mode=downsample) self.add_module('downsample', down_inst) elif ((input_size[0] < output_size[0]) or (input_size[1] < output_size[1])): if _USE_SCALE: scale = ((output_size[0] / input_size[0]), (output_size[1] / input_size[1])) self.add_module('upsample', Interpolate2d(scale_factor=scale, mode=upsample)) else: self.add_module('upsample', Interpolate2d(size=output_size, mode=upsample))
class FpnCombine(nn.Module): def __init__(self, feature_info, fpn_channels, inputs_offsets, output_size, pad_type='', downsample=None, upsample=None, norm_layer=nn.BatchNorm2d, apply_resample_bn=False, redundant_bias=False, weight_method='attn'): super(FpnCombine, self).__init__() self.inputs_offsets = inputs_offsets self.weight_method = weight_method self.resample = nn.ModuleDict() for (idx, offset) in enumerate(inputs_offsets): self.resample[str(offset)] = ResampleFeatureMap(feature_info[offset]['num_chs'], fpn_channels, input_size=feature_info[offset]['size'], output_size=output_size, pad_type=pad_type, downsample=downsample, upsample=upsample, norm_layer=norm_layer, apply_bn=apply_resample_bn, redundant_bias=redundant_bias) if ((weight_method == 'attn') or (weight_method == 'fastattn')): self.edge_weights = nn.Parameter(torch.ones(len(inputs_offsets)), requires_grad=True) else: self.edge_weights = None def forward(self, x: List[torch.Tensor]): dtype = x[0].dtype nodes = [] for (offset, resample) in zip(self.inputs_offsets, self.resample.values()): input_node = x[offset] input_node = resample(input_node) nodes.append(input_node) if (self.weight_method == 'attn'): normalized_weights = torch.softmax(self.edge_weights.to(dtype=dtype), dim=0) out = (torch.stack(nodes, dim=(- 1)) * normalized_weights) elif (self.weight_method == 'fastattn'): edge_weights = nn.functional.relu(self.edge_weights.to(dtype=dtype)) weights_sum = torch.sum(edge_weights) out = torch.stack([((nodes[i] * edge_weights[i]) / (weights_sum + 0.0001)) for i in range(len(nodes))], dim=(- 1)) elif (self.weight_method == 'sum'): out = torch.stack(nodes, dim=(- 1)) else: raise ValueError('unknown weight_method {}'.format(self.weight_method)) out = torch.sum(out, dim=(- 1)) return out