code
stringlengths
17
6.64M
def add_weight_decay(weight_decay: float, filter_fn: Optional[FilterFn]=None) -> optax.GradientTransformation: 'Adds a weight decay to the update.\n\n Args:\n weight_decay: weight_decay coeficient.\n filter_fn: an optional filter function.\n\n Returns:\n An (init_fn, update_fn) tuple.\n ' def init_fn(_) -> AddWeightDecayState: return AddWeightDecayState() def update_fn(updates: optax.Updates, state: AddWeightDecayState, params: optax.Params) -> Tuple[(optax.Updates, AddWeightDecayState)]: new_updates = jax.tree_multimap((lambda g, p: (g + (weight_decay * p))), updates, params) new_updates = _partial_update(updates, new_updates, params, filter_fn) return (new_updates, state) return optax.GradientTransformation(init_fn, update_fn)
def lars(learning_rate: ScalarOrSchedule, weight_decay: float=0.0, momentum: float=0.9, eta: float=0.001, weight_decay_filter: Optional[FilterFn]=None, lars_adaptation_filter: Optional[FilterFn]=None) -> optax.GradientTransformation: "Creates lars optimizer with weight decay.\n\n References:\n [You et al, 2017](https://arxiv.org/abs/1708.03888)\n\n Args:\n learning_rate: learning rate coefficient.\n weight_decay: weight decay coefficient.\n momentum: momentum coefficient.\n eta: LARS coefficient.\n weight_decay_filter: optional filter function to only apply the weight\n decay on a subset of parameters. The filter function takes as input the\n parameter path (as a tuple) and its associated update, and return a True\n for params to apply the weight decay and False for params to not apply\n the weight decay. When weight_decay_filter is set to None, the weight\n decay is not applied to the bias, i.e. when the variable name is 'b', and\n the weight decay is not applied to nornalization params, i.e. the\n penultimate path contains 'norm'.\n lars_adaptation_filter: similar to weight decay filter but for lars adaptation\n\n Returns:\n An optax.GradientTransformation, i.e. a (init_fn, update_fn) tuple.\n " if (weight_decay_filter is None): weight_decay_filter = exclude_bias_and_norm if (lars_adaptation_filter is None): lars_adaptation_filter = exclude_bias_and_norm return optax.chain(add_weight_decay(weight_decay=weight_decay, filter_fn=weight_decay_filter), scale_by_lars(momentum=momentum, eta=eta, filter_fn=lars_adaptation_filter), scale_by_learning_rate(learning_rate))
def _rename(kwargs, originals, new): for (o, n) in zip(originals, new): o = kwargs.pop(o, None) if (o is not None): kwargs[n] = o
def _erase(kwargs, names): for u in names: kwargs.pop(u, None)
def create_optax_optim(name, learning_rate=None, momentum=0.9, weight_decay=0, **kwargs): " Optimizer Factory\n\n Args:\n learning_rate (float): specify learning rate or leave up to scheduler / optim if None\n weight_decay (float): weight decay to apply to all params, not applied if 0\n **kwargs: optional / optimizer specific params that override defaults\n\n With regards to the kwargs, I've tried to keep the param naming incoming via kwargs from\n config file more consistent so there is less variation. Names of common args such as eps,\n beta1, beta2 etc will be remapped where possible (even if optimizer impl uses a diff name)\n and removed when not needed. A list of some common params to use in config files as named:\n eps (float): default stability / regularization epsilon value\n beta1 (float): moving average / momentum coefficient for gradient\n beta2 (float): moving average / momentum coefficient for gradient magnitude (squared grad)\n " name = name.lower() opt_args = dict(learning_rate=learning_rate, **kwargs) _rename(opt_args, ('beta1', 'beta2'), ('b1', 'b2')) if ((name == 'sgd') or (name == 'momentum') or (name == 'nesterov')): _erase(opt_args, ('eps',)) if (name == 'momentum'): optimizer = optax.sgd(momentum=momentum, **opt_args) elif (name == 'nesterov'): optimizer = optax.sgd(momentum=momentum, nesterov=True) else: assert (name == 'sgd') optimizer = optax.sgd(momentum=0, **opt_args) elif (name == 'adabelief'): optimizer = optax.adabelief(**opt_args) elif ((name == 'adam') or (name == 'adamw')): if (name == 'adamw'): optimizer = optax.adamw(weight_decay=weight_decay, **opt_args) else: optimizer = optax.adam(**opt_args) elif (name == 'lamb'): optimizer = optax.lamb(weight_decay=weight_decay, **opt_args) elif (name == 'lars'): optimizer = lars(weight_decay=weight_decay, **opt_args) elif (name == 'rmsprop'): optimizer = optax.rmsprop(momentum=momentum, **opt_args) elif (name == 'rmsproptf'): optimizer = optax.rmsprop(momentum=momentum, initial_scale=1.0, **opt_args) else: assert False, f'Invalid optimizer name specified ({name})' return optimizer
def get_like_padding(kernel_size: int, stride: int=1, dilation: int=1, **_) -> int: padding = (((stride - 1) + (dilation * (kernel_size - 1))) // 2) return padding
def _randomly_negate_tensor(tensor): 'With 50% prob turn the tensor negative.' should_flip = tf.cast(tf.floor((tf.random.uniform([]) + 0.5)), tf.bool) final_tensor = tf.cond(should_flip, (lambda : tensor), (lambda : (- tensor))) return final_tensor
def _rotate_level(level): level = ((level / _MAX_LEVEL) * 30.0) level = _randomly_negate_tensor(level) return (level,)
def _shrink_level(level): 'Converts level to ratio by which we shrink the image content.' if (level == 0): return (1.0,) level = ((2.0 / (_MAX_LEVEL / level)) + 0.9) return (level,)
def _enhance_level(level): level = ((level / _MAX_LEVEL) * 0.9) level = (1.0 + _randomly_negate_tensor(level)) level = tf.clip_by_value(level, 0.0, 3.0) return (level,)
def _shear_level(level): level = ((level / _MAX_LEVEL) * 0.3) level = _randomly_negate_tensor(level) return (level,)
def _translate_level(level, translate_const): level = ((level / _MAX_LEVEL) * float(translate_const)) level = _randomly_negate_tensor(level) return (level,)
def _get_args_fn(hparams): return {'AutoContrast': (lambda level: ()), 'Equalize': (lambda level: ()), 'Invert': (lambda level: ()), 'Rotate': (lambda level: (_rotate_level(level) + (hparams['fill_value'],))), 'Posterize': (lambda level: (int(((level / _MAX_LEVEL) * 4)),)), 'Solarize': (lambda level: (int(((level / _MAX_LEVEL) * 256)),)), 'SolarizeAdd': (lambda level: (int(((level / _MAX_LEVEL) * 110)),)), 'Color': _enhance_level, 'Contrast': _enhance_level, 'Brightness': _enhance_level, 'Sharpness': _enhance_level, 'ShearX': (lambda level: (_shear_level(level) + (hparams['fill_value'],))), 'ShearY': (lambda level: (_shear_level(level) + (hparams['fill_value'],))), 'TranslateX': (lambda level: (_translate_level(level, hparams['translate_const']) + (hparams['fill_value'],))), 'TranslateY': (lambda level: (_translate_level(level, hparams['translate_const']) + (hparams['fill_value'],))), 'Cutout': (lambda level: ())}
class RandAugment(): 'Random augment with fixed magnitude.\n FIXME this is a class based impl or RA from fixmatch, it needs some changes before using\n ' def __init__(self, num_layers=2, prob_to_apply=None, magnitude=None, num_levels=10): 'Initialized rand augment.\n Args:\n num_layers: number of augmentation layers, i.e. how many times to do augmentation.\n prob_to_apply: probability to apply on each layer. If None then always apply.\n magnitude: default magnitude in range [0, 1], if None then magnitude will be chosen randomly.\n num_levels: number of levels for quantization of the magnitude.\n ' self.num_layers = num_layers self.prob_to_apply = (float(prob_to_apply) if (prob_to_apply is not None) else None) self.num_levels = (int(num_levels) if num_levels else None) self.level = (float(magnitude) if (magnitude is not None) else None) self.augmentation_hparams = dict(translate_rel=0.4, translate_const=100) def _get_level(self): if (self.level is not None): return tf.convert_to_tensor(self.level) if (self.num_levels is None): return tf.random.uniform(shape=[], dtype=tf.float32) else: level = tf.random.uniform(shape=[], maxval=(self.num_levels + 1), dtype=tf.int32) return (tf.cast(level, tf.float32) / self.num_levels) def _apply_one_layer(self, image): 'Applies one level of augmentation to the image.' level = self._get_level() branch_fns = [] for augment_op_name in IMAGENET_AUG_OPS: augment_fn = NAME_TO_FUNC[augment_op_name] args_fn = _get_args_fn(self.augmentation_hparams)[augment_op_name] def _branch_fn(image=image, augment_fn=augment_fn, args_fn=args_fn): args = ([image] + list(args_fn(level))) return augment_fn(*args) branch_fns.append(_branch_fn) branch_index = tf.random.uniform(shape=[], maxval=len(branch_fns), dtype=tf.int32) aug_image = tf.switch_case(branch_index, branch_fns, default=(lambda : image)) if (self.prob_to_apply is not None): return tf.cond((tf.random.uniform(shape=[], dtype=tf.float32) < self.prob_to_apply), (lambda : aug_image), (lambda : image)) else: return aug_image def __call__(self, image, aug_image_key='image'): output_dict = {} if (aug_image_key is not None): aug_image = image for _ in range(self.num_layers): aug_image = self._apply_one_layer(aug_image) output_dict[aug_image_key] = aug_image if (aug_image_key != 'image'): output_dict['image'] = image return output_dict
def _parse_policy_info(name, prob, level, augmentation_hparams): 'Return the function that corresponds to `name` and update `level` param.' func = NAME_TO_FUNC[name] args = _get_args_fn(augmentation_hparams)[name](level) return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob): 'Apply `func` to image w/ `args` as input with probability `prob`.' assert isinstance(args, tuple) should_apply_op = tf.cast(tf.floor((tf.random.uniform([], dtype=tf.float32) + prob)), tf.bool) augmented_image = tf.cond(should_apply_op, (lambda : func(image, *args)), (lambda : image)) return augmented_image
def select_and_apply_random_policy(policies, image): 'Select a random policy from `policies` and apply it to `image`.' policy_to_select = tf.random.uniform([], maxval=len(policies), dtype=tf.int32) for (i, policy) in enumerate(policies): image = tf.cond(tf.equal(i, policy_to_select), (lambda selected_policy=policy: selected_policy(image)), (lambda : image)) return image
def distort_image_with_randaugment(image, num_layers, magnitude, fill_value=(128, 128, 128)): 'Applies the RandAugment policy to `image`.\n\n RandAugment is from the paper https://arxiv.org/abs/1909.13719,\n\n Args:\n image: `Tensor` of shape [height, width, 3] representing an image.\n num_layers: Integer, the number of augmentation transformations to apply\n sequentially to an image. Represented as (N) in the paper. Usually best\n values will be in the range [1, 3].\n magnitude: Integer, shared magnitude across all augmentation operations.\n Represented as (M) in the paper. Usually best values are in the range [5, 30].\n\n Returns:\n The augmented version of `image`.\n ' augmentation_hparams = dict(translate_rel=0.4, translate_const=100, fill_value=fill_value) available_ops = ['AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'SolarizeAdd'] for layer_num in range(num_layers): op_to_select = tf.random.uniform([], maxval=len(available_ops), dtype=tf.int32) random_magnitude = float(magnitude) with tf.name_scope('randaug_layer_{}'.format(layer_num)): for (i, op_name) in enumerate(available_ops): prob = tf.random.uniform([], minval=0.2, maxval=0.8, dtype=tf.float32) (func, _, args) = _parse_policy_info(op_name, prob, random_magnitude, augmentation_hparams) image = tf.cond(tf.equal(i, op_to_select), (lambda selected_func=func, selected_args=args: selected_func(image, *selected_args)), (lambda : image)) return image
class Split(enum.Enum): 'Imagenet dataset split.' TRAIN = 1 TEST = 2 @property def num_examples(self): return {Split.TRAIN: 1281167, Split.TEST: 50000}[self]
def _to_tfds_split(split: Split) -> tfds.Split: 'Returns the TFDS split appropriately sharded.' if (split == Split.TRAIN): return tfds.Split.TRAIN else: assert (split == Split.TEST) return tfds.Split.VALIDATION
def _shard(split: Split, shard_index: int, num_shards: int) -> Tuple[(int, int)]: 'Returns [start, end) for the given shard index.' assert (shard_index < num_shards) arange = np.arange(split.num_examples) shard_range = np.array_split(arange, num_shards)[shard_index] (start, end) = (shard_range[0], (shard_range[(- 1)] + 1)) return (start, end)
def load(split: Split, is_training: bool, batch_dims: Sequence[int], image_size: int=IMAGE_SIZE, chw: bool=False, dataset_name='imagenet2012:5.0.0', mean: Optional[Tuple[float]]=None, std: Optional[Tuple[float]]=None, interpolation: str='bicubic', tfds_data_dir: Optional[str]=None): mean = (MEAN_RGB if (mean is None) else mean) std = (STDDEV_RGB if (std is None) else std) 'Loads the given split of the dataset.' if is_training: (start, end) = _shard(split, jax.host_id(), jax.host_count()) else: (start, end) = _shard(split, 0, 1) tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split), from_=start, to=end, unit='abs') ds = tfds.load(dataset_name, split=tfds_split, decoders={'image': tfds.decode.SkipDecoding()}, data_dir=tfds_data_dir) total_batch_size = np.prod(batch_dims) options = ds.options() options.experimental_threading.private_threadpool_size = 16 options.experimental_threading.max_intra_op_parallelism = 1 if is_training: options.experimental_deterministic = False if is_training: ds = ds.repeat() ds = ds.shuffle(buffer_size=(10 * total_batch_size), seed=0) elif ((split.num_examples % total_batch_size) != 0): raise ValueError(f'Test set size must be divisible by {total_batch_size}') num_batches = (split.num_examples // total_batch_size) interpolation = (tf.image.ResizeMethod.BILINEAR if ('bilinear' in interpolation) else tf.image.ResizeMethod.BICUBIC) def preprocess(example): image = _preprocess_image(example['image'], is_training, image_size=image_size, mean=mean, std=std, interpolation=interpolation) if chw: image = tf.transpose(image, (2, 0, 1)) label = tf.cast(example['label'], tf.int32) return {'images': image, 'labels': label} ds = ds.map(preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE) for batch_size in reversed(batch_dims): ds = ds.batch(batch_size) ds = ds.prefetch(tf.data.experimental.AUTOTUNE) return (tfds.as_numpy(ds), num_batches)
def normalize_image_for_view(image, mean=MEAN_RGB, std=STDDEV_RGB): 'Normalizes dataset image into the format for viewing.' image *= np.reshape(mean, (3, 1, 1)) image += np.reshape(std, (3, 1, 1)) image = np.transpose(image, (1, 2, 0)) return image.clip(0, 255).round().astype('uint8')
def _preprocess_image(image_bytes: tf.Tensor, is_training: bool, image_size: int=IMAGE_SIZE, mean=MEAN_RGB, std=STDDEV_RGB, interpolation=tf.image.ResizeMethod.BICUBIC) -> tf.Tensor: 'Returns processed and resized images.' if is_training: image = _decode_and_random_crop(image_bytes, image_size=image_size) image = tf.image.random_flip_left_right(image) else: image = _decode_and_center_crop(image_bytes, image_size=image_size) assert (image.dtype == tf.uint8) image = tf.image.resize(image, [image_size, image_size], interpolation) image = _normalize_image(image, mean=mean, std=std) return image
def _normalize_image(image: tf.Tensor, mean=MEAN_RGB, std=STDDEV_RGB) -> tf.Tensor: 'Normalize the image to zero mean and unit variance.' image -= tf.constant(mean, shape=[1, 1, 3], dtype=image.dtype) image /= tf.constant(std, shape=[1, 1, 3], dtype=image.dtype) return image
def _distorted_bounding_box_crop(image_bytes: tf.Tensor, jpeg_shape: tf.Tensor, bbox: tf.Tensor, min_object_covered: float, aspect_ratio_range: Tuple[(float, float)], area_range: Tuple[(float, float)], max_attempts: int) -> tf.Tensor: 'Generates cropped_image using one of the bboxes randomly distorted.' (bbox_begin, bbox_size, _) = tf.image.sample_distorted_bounding_box(jpeg_shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) (offset_y, offset_x, _) = tf.unstack(bbox_begin) (target_height, target_width, _) = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) return image
def _decode_and_random_crop(image_bytes: tf.Tensor, image_size: int=224) -> tf.Tensor: 'Make a random crop of image.' jpeg_shape = tf.image.extract_jpeg_shape(image_bytes) bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) image = _distorted_bounding_box_crop(image_bytes, jpeg_shape=jpeg_shape, bbox=bbox, min_object_covered=0.1, aspect_ratio_range=((3 / 4), (4 / 3)), area_range=(0.08, 1.0), max_attempts=10) if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))): image = _decode_and_center_crop(image_bytes, image_size=image_size, jpeg_shape=jpeg_shape) return image
def _decode_and_center_crop(image_bytes: tf.Tensor, image_size: int=224, jpeg_shape: Optional[tf.Tensor]=None) -> tf.Tensor: 'Crops to center of image with padding then scales.' if (jpeg_shape is None): jpeg_shape = tf.image.extract_jpeg_shape(image_bytes) image_height = jpeg_shape[0] image_width = jpeg_shape[1] padded_center_crop_size = tf.cast((tf.cast((image_size / (image_size + IMAGE_PADDING_FOR_CROP)), tf.float32) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32) offset_height = (((image_height - padded_center_crop_size) + 1) // 2) offset_width = (((image_width - padded_center_crop_size) + 1) // 2) crop_window = tf.stack([offset_height, offset_width, padded_center_crop_size, padded_center_crop_size]) image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) return image
def distorted_bounding_box_crop(image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100): 'Generates cropped_image using one of the bboxes randomly distorted.\n\n See `tf.image.sample_distorted_bounding_box` for more documentation.\n\n Args:\n image_bytes: `Tensor` of binary image data.\n bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`\n where each coordinate is [0, 1) and the coordinates are arranged\n as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole\n image.\n min_object_covered: An optional `float`. Defaults to `0.1`. The cropped\n area of the image must contain at least this fraction of any bounding\n box supplied.\n aspect_ratio_range: An optional list of `float`s. The cropped area of the\n image must have an aspect ratio = width / height within this range.\n area_range: An optional list of `float`s. The cropped area of the image\n must contain a fraction of the supplied image within in this range.\n max_attempts: An optional `int`. Number of attempts at generating a cropped\n region of the image of the specified constraints. After `max_attempts`\n failures, return the entire image.\n Returns:\n cropped image `Tensor`\n ' shape = tf.io.extract_jpeg_shape(image_bytes) sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) (bbox_begin, bbox_size, _) = sample_distorted_bounding_box (offset_y, offset_x, _) = tf.unstack(bbox_begin) (target_height, target_width, _) = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) image = tf.io.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) return image
def resize(image, image_size, interpolation=tf.image.ResizeMethod.BICUBIC, antialias=True): return tf.image.resize([image], [image_size, image_size], method=interpolation, antialias=antialias)[0]
def at_least_x_are_equal(a, b, x): 'At least `x` of `a` and `b` `Tensors` are equal.' match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x)
def decode_and_random_crop(image_bytes, image_size, interpolation): 'Make a random crop of image_size.' bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) image = distorted_bounding_box_crop(image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=((3.0 / 4), (4.0 / 3.0)), area_range=(0.08, 1.0), max_attempts=10) original_shape = tf.io.extract_jpeg_shape(image_bytes) bad = at_least_x_are_equal(original_shape, tf.shape(image), 3) image = tf.cond(bad, (lambda : decode_and_center_crop(image_bytes, image_size, interpolation)), (lambda : resize(image, image_size, interpolation))) return image
def decode_and_center_crop(image_bytes, image_size, interpolation): 'Crops to center of image with padding then scales image_size.' shape = tf.io.extract_jpeg_shape(image_bytes) image_height = shape[0] image_width = shape[1] padded_center_crop_size = tf.cast(((image_size / (image_size + CROP_PADDING)) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32) offset_height = (((image_height - padded_center_crop_size) + 1) // 2) offset_width = (((image_width - padded_center_crop_size) + 1) // 2) crop_window = tf.stack([offset_height, offset_width, padded_center_crop_size, padded_center_crop_size]) image = tf.io.decode_and_crop_jpeg(image_bytes, crop_window) image = resize(image, image_size, interpolation) return image
def normalize_image(image, mean=MEAN_RGB, std=STDDEV_RGB): image -= tf.constant(mean, shape=[1, 1, 3], dtype=image.dtype) image /= tf.constant(std, shape=[1, 1, 3], dtype=image.dtype) return image
def preprocess_for_train(image_bytes, dtype=tf.float32, image_size=IMAGE_SIZE, mean=MEAN_RGB, std=STDDEV_RGB, interpolation=tf.image.ResizeMethod.BICUBIC, augment_name=None, randaug_num_layers=None, randaug_magnitude=None): 'Preprocesses the given image for training.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n dtype: data type of the image.\n image_size: image size.\n\n Returns:\n A preprocessed image `Tensor`.\n ' image = decode_and_random_crop(image_bytes, image_size, interpolation) image = tf.image.random_flip_left_right(image) image = tf.reshape(image, [image_size, image_size, 3]) if augment_name: logging.info('Apply AutoAugment policy %s', augment_name) fill_value = [int(round(v)) for v in MEAN_RGB] image = to_uint8(image, saturate=False) if (augment_name == 'randaugment'): image = distort_image_with_randaugment(image, randaug_num_layers, randaug_magnitude, fill_value=fill_value) else: raise ValueError(('Invalid value for augment_name: %s' % augment_name)) image = to_float(image) image = normalize_image(image, mean=mean, std=std) image = tf.image.convert_image_dtype(image, dtype=dtype) return image
def preprocess_for_eval(image_bytes, dtype=tf.float32, image_size=IMAGE_SIZE, mean=MEAN_RGB, std=STDDEV_RGB, interpolation=tf.image.ResizeMethod.BICUBIC): 'Preprocesses the given image for evaluation.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n dtype: data type of the image.\n image_size: image size.\n\n Returns:\n A preprocessed image `Tensor`.\n ' image = decode_and_center_crop(image_bytes, image_size, interpolation) image = tf.reshape(image, [image_size, image_size, 3]) image = normalize_image(image, mean=mean, std=std) image = tf.image.convert_image_dtype(image, dtype=dtype) return image
def create_split(dataset_builder: tfds.core.DatasetBuilder, batch_size: int, train: bool=True, half_precision: bool=False, image_size: int=IMAGE_SIZE, mean: Optional[Tuple[float]]=None, std: Optional[Tuple[float]]=None, interpolation: str='bicubic', augment_name: Optional[str]=None, randaug_num_layers: Optional[int]=None, randaug_magnitude: Optional[int]=None, cache: bool=False, no_repeat: bool=False): "Creates a split from the ImageNet dataset using TensorFlow Datasets.\n\n Args:\n dataset_builder: TFDS dataset builder for ImageNet.\n batch_size: the batch size returned by the data pipeline.\n train: Whether to load the train or evaluation split.\n half_precision: convert image datatype to half-precision\n image_size: The target size of the images (default: 224).\n mean: image dataset mean\n std: image dataset std-dev\n interpolation: interpolation method to use for image resize (default: 'bicubic')\n cache: Whether to cache the dataset (default: False).\n no_repeat: disable repeat iter for evaluation\n Returns:\n A `tf.data.Dataset`.\n " mean = (mean or MEAN_RGB) std = (std or STDDEV_RGB) interpolation = (tf.image.ResizeMethod.BICUBIC if (interpolation == 'bicubic') else tf.image.ResizeMethod.BILINEAR) platform = jax.local_devices()[0].platform if half_precision: if (platform == 'tpu'): input_dtype = tf.bfloat16 else: input_dtype = tf.float16 else: input_dtype = tf.float32 if train: data_size = dataset_builder.info.splits['train'].num_examples split = 'train' else: data_size = dataset_builder.info.splits['validation'].num_examples split = 'validation' split_size = (data_size // jax.host_count()) start = (jax.host_id() * split_size) split = (split + '[{}:{}]'.format(start, (start + split_size))) def _decode_example(example): if train: image = preprocess_for_train(example['image'], input_dtype, image_size, mean, std, interpolation, augment_name=augment_name, randaug_num_layers=randaug_num_layers, randaug_magnitude=randaug_magnitude) else: image = preprocess_for_eval(example['image'], input_dtype, image_size, mean, std, interpolation) return {'image': image, 'label': example['label']} ds = dataset_builder.as_dataset(split=split, decoders={'image': tfds.decode.SkipDecoding()}) ds.options().experimental_threading.private_threadpool_size = 16 ds.options().experimental_threading.max_intra_op_parallelism = 1 if cache: ds = ds.cache() if train: ds = ds.repeat() ds = ds.shuffle((16 * batch_size), seed=0) ds = ds.map(_decode_example, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds = ds.batch(batch_size, drop_remainder=True) if ((not train) and (not no_repeat)): ds = ds.repeat() ds = ds.prefetch(10) return ds
def random_apply(func, p, x): 'Randomly apply function func to x with probability p.' return tf.cond(tf.less(tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32), tf.cast(p, tf.float32)), (lambda : func(x)), (lambda : x))
def random_brightness(image, max_delta, impl='simclrv2'): 'A multiplicative vs additive change of brightness.' if (impl == 'simclrv2'): factor = tf.random.uniform([], tf.maximum((1.0 - max_delta), 0), (1.0 + max_delta)) image = (image * factor) elif (impl == 'simclrv1'): image = tf.image.random_brightness(image, max_delta=max_delta) else: raise ValueError('Unknown impl {} for random brightness.'.format(impl)) return image
def to_grayscale(image, keep_channels=True): image = tf.image.rgb_to_grayscale(image) if keep_channels: image = tf.tile(image, [1, 1, 3]) return image
def color_jitter(image, strength, random_order=True, impl='simclrv2'): "Distorts the color of the image.\n\n Args:\n image: The input image tensor.\n strength: the floating number for the strength of the color augmentation.\n random_order: A bool, specifying whether to randomize the jittering order.\n impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's\n version of random brightness.\n\n Returns:\n The distorted image tensor.\n " brightness = (0.8 * strength) contrast = (0.8 * strength) saturation = (0.8 * strength) hue = (0.2 * strength) if random_order: return color_jitter_rand(image, brightness, contrast, saturation, hue, impl=impl) else: return color_jitter_nonrand(image, brightness, contrast, saturation, hue, impl=impl)
def color_jitter_nonrand(image, brightness=0, contrast=0, saturation=0, hue=0, impl='simclrv2'): "Distorts the color of the image (jittering order is fixed).\n\n Args:\n image: The input image tensor.\n brightness: A float, specifying the brightness for color jitter.\n contrast: A float, specifying the contrast for color jitter.\n saturation: A float, specifying the saturation for color jitter.\n hue: A float, specifying the hue for color jitter.\n impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's\n version of random brightness.\n\n Returns:\n The distorted image tensor.\n " with tf.name_scope('distort_color'): def apply_transform(i, x, brightness, contrast, saturation, hue): 'Apply the i-th transformation.' if ((brightness != 0) and (i == 0)): x = random_brightness(x, max_delta=brightness, impl=impl) elif ((contrast != 0) and (i == 1)): x = tf.image.random_contrast(x, lower=(1 - contrast), upper=(1 + contrast)) elif ((saturation != 0) and (i == 2)): x = tf.image.random_saturation(x, lower=(1 - saturation), upper=(1 + saturation)) elif (hue != 0): x = tf.image.random_hue(x, max_delta=hue) return x for i in range(4): image = apply_transform(i, image, brightness, contrast, saturation, hue) image = tf.clip_by_value(image, 0.0, 1.0) return image
def color_jitter_rand(image, brightness=0, contrast=0, saturation=0, hue=0, impl='simclrv2'): "Distorts the color of the image (jittering order is random).\n\n Args:\n image: The input image tensor.\n brightness: A float, specifying the brightness for color jitter.\n contrast: A float, specifying the contrast for color jitter.\n saturation: A float, specifying the saturation for color jitter.\n hue: A float, specifying the hue for color jitter.\n impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's\n version of random brightness.\n\n Returns:\n The distorted image tensor.\n " with tf.name_scope('distort_color'): def apply_transform(i, x): 'Apply the i-th transformation.' def brightness_foo(): if (brightness == 0): return x else: return random_brightness(x, max_delta=brightness, impl=impl) def contrast_foo(): if (contrast == 0): return x else: return tf.image.random_contrast(x, lower=(1 - contrast), upper=(1 + contrast)) def saturation_foo(): if (saturation == 0): return x else: return tf.image.random_saturation(x, lower=(1 - saturation), upper=(1 + saturation)) def hue_foo(): if (hue == 0): return x else: return tf.image.random_hue(x, max_delta=hue) x = tf.cond(tf.less(i, 2), (lambda : tf.cond(tf.less(i, 1), brightness_foo, contrast_foo)), (lambda : tf.cond(tf.less(i, 3), saturation_foo, hue_foo))) return x perm = tf.random.shuffle(tf.range(4)) for i in range(4): image = apply_transform(perm[i], image) image = tf.clip_by_value(image, 0.0, 1.0) return image
def _compute_crop_shape(image_height, image_width, aspect_ratio, crop_proportion): 'Compute aspect ratio-preserving shape for central crop.\n\n The resulting shape retains `crop_proportion` along one side and a proportion\n less than or equal to `crop_proportion` along the other side.\n\n Args:\n image_height: Height of image to be cropped.\n image_width: Width of image to be cropped.\n aspect_ratio: Desired aspect ratio (width / height) of output.\n crop_proportion: Proportion of image to retain along the less-cropped side.\n\n Returns:\n crop_height: Height of image after cropping.\n crop_width: Width of image after cropping.\n ' image_width_float = tf.cast(image_width, tf.float32) image_height_float = tf.cast(image_height, tf.float32) def _requested_aspect_ratio_wider_than_image(): crop_height = tf.cast(tf.math.rint(((crop_proportion / aspect_ratio) * image_width_float)), tf.int32) crop_width = tf.cast(tf.math.rint((crop_proportion * image_width_float)), tf.int32) return (crop_height, crop_width) def _image_wider_than_requested_aspect_ratio(): crop_height = tf.cast(tf.math.rint((crop_proportion * image_height_float)), tf.int32) crop_width = tf.cast(tf.math.rint(((crop_proportion * aspect_ratio) * image_height_float)), tf.int32) return (crop_height, crop_width) return tf.cond((aspect_ratio > (image_width_float / image_height_float)), _requested_aspect_ratio_wider_than_image, _image_wider_than_requested_aspect_ratio)
def center_crop(image, height, width, crop_proportion): 'Crops to center of image and rescales to desired size.\n\n Args:\n image: Image Tensor to crop.\n height: Height of image to be cropped.\n width: Width of image to be cropped.\n crop_proportion: Proportion of image to retain along the less-cropped side.\n\n Returns:\n A `height` x `width` x channels Tensor holding a central crop of `image`.\n ' shape = tf.shape(image) image_height = shape[0] image_width = shape[1] (crop_height, crop_width) = _compute_crop_shape(image_height, image_width, (height / width), crop_proportion) offset_height = (((image_height - crop_height) + 1) // 2) offset_width = (((image_width - crop_width) + 1) // 2) image = tf.image.crop_to_bounding_box(image, offset_height, offset_width, crop_height, crop_width) image = tf.image.resize([image], [height, width], method=tf.image.ResizeMethod.BICUBIC)[0] return image
def distorted_bounding_box_crop(image, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None): 'Generates cropped_image using one of the bboxes randomly distorted.\n\n See `tf.image.sample_distorted_bounding_box` for more documentation.\n\n Args:\n image: `Tensor` of image data.\n bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`\n where each coordinate is [0, 1) and the coordinates are arranged\n as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole\n image.\n min_object_covered: An optional `float`. Defaults to `0.1`. The cropped\n area of the image must contain at least this fraction of any bounding\n box supplied.\n aspect_ratio_range: An optional list of `float`s. The cropped area of the\n image must have an aspect ratio = width / height within this range.\n area_range: An optional list of `float`s. The cropped area of the image\n must contain a fraction of the supplied image within in this range.\n max_attempts: An optional `int`. Number of attempts at generating a cropped\n region of the image of the specified constraints. After `max_attempts`\n failures, return the entire image.\n scope: Optional `str` for name scope.\n Returns:\n (cropped image `Tensor`, distorted bbox `Tensor`).\n ' with tf.name_scope((scope or 'distorted_bounding_box_crop')): shape = tf.shape(image) sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) (bbox_begin, bbox_size, _) = sample_distorted_bounding_box (offset_y, offset_x, _) = tf.unstack(bbox_begin) (target_height, target_width, _) = tf.unstack(bbox_size) image = tf.image.crop_to_bounding_box(image, offset_y, offset_x, target_height, target_width) return image
def crop_and_resize(image, height, width): 'Make a random crop and resize it to height `height` and width `width`.\n\n Args:\n image: Tensor representing the image.\n height: Desired image height.\n width: Desired image width.\n\n Returns:\n A `height` x `width` x channels Tensor holding a random crop of `image`.\n ' bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) aspect_ratio = (width / height) image = distorted_bounding_box_crop(image, bbox, min_object_covered=0.1, aspect_ratio_range=(((3.0 / 4) * aspect_ratio), ((4.0 / 3.0) * aspect_ratio)), area_range=(0.08, 1.0), max_attempts=100, scope=None) return tf.image.resize([image], [height, width], method=tf.image.ResizeMethod.BICUBIC)[0]
def gaussian_blur(image, kernel_size, sigma, padding='SAME'): "Blurs the given image with separable convolution.\n\n\n Args:\n image: Tensor of shape [height, width, channels] and dtype float to blur.\n kernel_size: Integer Tensor for the size of the blur kernel. This is should\n be an odd number. If it is an even number, the actual kernel size will be\n size + 1.\n sigma: Sigma value for gaussian operator.\n padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.\n\n Returns:\n A Tensor representing the blurred image.\n " radius = tf.cast((kernel_size / 2), dtype=tf.int32) kernel_size = ((radius * 2) + 1) x = tf.cast(tf.range((- radius), (radius + 1)), dtype=tf.float32) blur_filter = tf.exp(((- tf.pow(x, 2.0)) / (2.0 * tf.pow(tf.cast(sigma, dtype=tf.float32), 2.0)))) blur_filter /= tf.reduce_sum(blur_filter) blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1]) blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1]) num_channels = tf.shape(image)[(- 1)] blur_h = tf.tile(blur_h, [1, 1, num_channels, 1]) blur_v = tf.tile(blur_v, [1, 1, num_channels, 1]) expand_batch_dim = (image.shape.ndims == 3) if expand_batch_dim: image = tf.expand_dims(image, axis=0) blurred = tf.nn.depthwise_conv2d(image, blur_h, strides=[1, 1, 1, 1], padding=padding) blurred = tf.nn.depthwise_conv2d(blurred, blur_v, strides=[1, 1, 1, 1], padding=padding) if expand_batch_dim: blurred = tf.squeeze(blurred, axis=0) return blurred
def random_crop_with_resize(image, height, width, p=1.0): 'Randomly crop and resize an image.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n height: Height of output image.\n width: Width of output image.\n p: Probability of applying this transformation.\n\n Returns:\n A preprocessed image `Tensor`.\n ' def _transform(image): image = crop_and_resize(image, height, width) return image return random_apply(_transform, p=p, x=image)
def random_color_jitter(image, p=1.0, impl='simclrv2'): def _transform(image): color_jitter_t = functools.partial(color_jitter, strength=FLAGS.color_jitter_strength, impl=impl) image = random_apply(color_jitter_t, p=0.8, x=image) return random_apply(to_grayscale, p=0.2, x=image) return random_apply(_transform, p=p, x=image)
def random_blur(image, height, width, p=1.0): 'Randomly blur an image.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n height: Height of output image.\n width: Width of output image.\n p: probability of applying this transformation.\n\n Returns:\n A preprocessed image `Tensor`.\n ' del width def _transform(image): sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32) return gaussian_blur(image, kernel_size=(height // 10), sigma=sigma, padding='SAME') return random_apply(_transform, p=p, x=image)
def batch_random_blur(images_list, height, width, blur_probability=0.5): 'Apply efficient batch data transformations.\n\n Args:\n images_list: a list of image tensors.\n height: the height of image.\n width: the width of image.\n blur_probability: the probaility to apply the blur operator.\n\n Returns:\n Preprocessed feature list.\n ' def generate_selector(p, bsz): shape = [bsz, 1, 1, 1] selector = tf.cast(tf.less(tf.random.uniform(shape, 0, 1, dtype=tf.float32), p), tf.float32) return selector new_images_list = [] for images in images_list: images_new = random_blur(images, height, width, p=1.0) selector = generate_selector(blur_probability, tf.shape(images)[0]) images = ((images_new * selector) + (images * (1 - selector))) images = tf.clip_by_value(images, 0.0, 1.0) new_images_list.append(images) return new_images_list
def preprocess_for_train(image, height, width, color_distort=True, crop=True, flip=True, impl='simclrv2'): "Preprocesses the given image for training.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n height: Height of output image.\n width: Width of output image.\n color_distort: Whether to apply the color distortion.\n crop: Whether to crop the image.\n flip: Whether or not to flip left and right of an image.\n impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's\n version of random brightness.\n\n Returns:\n A preprocessed image `Tensor`.\n " if crop: image = random_crop_with_resize(image, height, width) if flip: image = tf.image.random_flip_left_right(image) if color_distort: image = random_color_jitter(image, impl=impl) image = tf.reshape(image, [height, width, 3]) image = tf.clip_by_value(image, 0.0, 1.0) return image
def preprocess_for_eval(image, height, width, crop=True): 'Preprocesses the given image for evaluation.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n height: Height of output image.\n width: Width of output image.\n crop: Whether or not to (center) crop the test images.\n\n Returns:\n A preprocessed image `Tensor`.\n ' if crop: image = center_crop(image, height, width, crop_proportion=CROP_PROPORTION) image = tf.reshape(image, [height, width, 3]) image = tf.clip_by_value(image, 0.0, 1.0) return image
def preprocess_image(image, height, width, is_training=False, color_distort=True, test_crop=True): 'Preprocesses the given image.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n height: Height of output image.\n width: Width of output image.\n is_training: `bool` for whether the preprocessing is for training.\n color_distort: whether to apply the color distortion.\n test_crop: whether or not to extract a central crop of the images\n (as for standard ImageNet evaluation) during the evaluation.\n\n Returns:\n A preprocessed image `Tensor` of range [0, 1].\n ' image = tf.image.convert_image_dtype(image, dtype=tf.float32) if is_training: return preprocess_for_train(image, height, width, color_distort) else: return preprocess_for_eval(image, height, width, test_crop)
def create_conv(features, kernel_size, conv_layer=None, **kwargs): ' Select a convolution implementation based on arguments\n Creates and returns one of Conv, MixedConv, or CondConv (TODO)\n ' conv_layer = (conv2d if (conv_layer is None) else conv_layer) if isinstance(kernel_size, list): assert ('num_experts' not in kwargs) assert ('groups' not in kwargs) m = MixedConv(features, kernel_size, conv_layer=conv_layer, **kwargs) else: depthwise = kwargs.pop('depthwise', False) groups = (features if depthwise else kwargs.pop('groups', 1)) m = conv_layer(features, kernel_size, groups=groups, **kwargs) return m
class SqueezeExcite(nn.Module): num_features: int block_features: int = None se_ratio: float = 0.25 divisor: int = 1 reduce_from_block: bool = True dtype: Dtype = jnp.float32 conv_layer: ModuleDef = conv2d act_fn: Callable = nn.relu bound_act_fn: Optional[Callable] = None gate_fn: Callable = nn.sigmoid @nn.compact def __call__(self, x): x_se = jnp.asarray(x, jnp.float32) x_se = x_se.mean((1, 2), keepdims=True) x_se = jnp.asarray(x_se, self.dtype) base_features = (self.block_features if (self.block_features and self.reduce_from_block) else self.num_features) reduce_features: int = make_divisible((base_features * self.se_ratio), self.divisor) act_fn = (self.bound_act_fn if (self.bound_act_fn is not None) else self.act_fn) x_se = self.conv_layer(reduce_features, 1, stride=1, bias=True, name='reduce')(x_se) x_se = act_fn(x_se) x_se = self.conv_layer(self.num_features, 1, stride=1, bias=True, name='expand')(x_se) return (x * self.gate_fn(x_se))
class ConvBnAct(nn.Module): out_features: int in_features: int = None kernel_size: int = 3 stride: int = 1 dilation: int = 1 pad_type: str = 'LIKE' conv_layer: ModuleDef = conv2d norm_layer: ModuleDef = batchnorm2d act_fn: Callable = nn.relu @nn.compact def __call__(self, x, training: bool): x = self.conv_layer(self.out_features, self.kernel_size, stride=self.stride, dilation=self.dilation, padding=self.pad_type, name='conv')(x) x = self.norm_layer(name='bn')(x, training=training) x = self.act_fn(x) return x
class DepthwiseSeparable(nn.Module): ' DepthwiseSeparable block\n Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion\n (factor of 1.0). This is an alternative to having a IR with an optional first pw conv.\n ' in_features: int out_features: int dw_kernel_size: int = 3 pw_kernel_size: int = 1 stride: int = 1 dilation: int = 1 pad_type: str = 'LIKE' noskip: bool = False pw_act: bool = False se_ratio: float = 0.0 drop_path_rate: float = 0.0 conv_layer: ModuleDef = conv2d norm_layer: ModuleDef = batchnorm2d se_layer: ModuleDef = SqueezeExcite act_fn: Callable = nn.relu @nn.compact def __call__(self, x, training: bool): shortcut = x x = create_conv(self.in_features, self.dw_kernel_size, stride=self.stride, dilation=self.dilation, padding=self.pad_type, depthwise=True, conv_layer=self.conv_layer, name='conv_dw')(x) x = self.norm_layer(name='bn_dw')(x, training=training) x = self.act_fn(x) if ((self.se_layer is not None) and (self.se_ratio > 0)): x = self.se_layer(num_features=self.in_features, se_ratio=self.se_ratio, conv_layer=self.conv_layer, act_fn=self.act_fn, name='se')(x) x = create_conv(self.out_features, self.pw_kernel_size, padding=self.pad_type, conv_layer=self.conv_layer, name='conv_pw')(x) x = self.norm_layer(name='bn_pw')(x, training=training) if self.pw_act: x = self.act_fn(x) if (((self.stride == 1) and (self.in_features == self.out_features)) and (not self.noskip)): x = DropPath(self.drop_path_rate)(x, training=training) x = (x + shortcut) return x
class InvertedResidual(nn.Module): ' Inverted residual block w/ optional SE and CondConv routing' in_features: int out_features: int exp_kernel_size: int = 1 dw_kernel_size: int = 3 pw_kernel_size: int = 1 stride: int = 1 dilation: int = 1 pad_type: str = 'LIKE' noskip: bool = False exp_ratio: float = 1.0 se_ratio: float = 0.0 drop_path_rate: float = 0.0 conv_layer: ModuleDef = conv2d norm_layer: ModuleDef = batchnorm2d se_layer: ModuleDef = SqueezeExcite act_fn: Callable = nn.relu @nn.compact def __call__(self, x, training: bool): shortcut = x features = make_divisible((self.in_features * self.exp_ratio)) if (self.exp_ratio > 1.0): x = create_conv(features, self.exp_kernel_size, padding=self.pad_type, conv_layer=self.conv_layer, name='conv_exp')(x) x = self.norm_layer(name='bn_exp')(x, training=training) x = self.act_fn(x) x = create_conv(features, self.dw_kernel_size, stride=self.stride, dilation=self.dilation, padding=self.pad_type, depthwise=True, conv_layer=self.conv_layer, name='conv_dw')(x) x = self.norm_layer(name='bn_dw')(x, training=training) x = self.act_fn(x) if ((self.se_layer is not None) and (self.se_ratio > 0)): x = self.se_layer(num_features=features, block_features=self.in_features, se_ratio=self.se_ratio, conv_layer=self.conv_layer, act_fn=self.act_fn, name='se')(x) x = create_conv(self.out_features, self.pw_kernel_size, padding=self.pad_type, conv_layer=self.conv_layer, name='conv_pwl')(x) x = self.norm_layer(name='bn_pwl')(x, training=training) if (((self.stride == 1) and (self.in_features == self.out_features)) and (not self.noskip)): x = DropPath(self.drop_path_rate)(x, training=training) x = (x + shortcut) return x
class EdgeResidual(nn.Module): ' Residual block with expansion convolution followed by pointwise-linear w/ stride' in_features: int out_features: int exp_kernel_size: int = 1 dw_kernel_size: int = 3 pw_kernel_size: int = 1 stride: int = 1 dilation: int = 1 pad_type: str = 'LIKE' noskip: bool = False exp_ratio: float = 1.0 se_ratio: float = 0.0 drop_path_rate: float = 0.0 conv_layer: ModuleDef = conv2d norm_layer: ModuleDef = batchnorm2d se_layer: ModuleDef = SqueezeExcite act_fn: Callable = nn.relu @nn.compact def __call__(self, x, training: bool): shortcut = x features = make_divisible((x.shape[(- 1)] * self.exp_ratio)) x = create_conv(features, self.exp_kernel_size, padding=self.pad_type, conv_layer=self.conv_layer, name='conv_exp')(x) x = self.norm_layer(name='bn_exp')(x, training=training) x = self.act_fn(x) if ((self.se_layer is not None) and (self.se_ratio > 0)): x = self.se_layer(num_features=features, block_features=self.in_features, se_ratio=self.se_ratio, conv_layer=self.conv_layer, act_fn=self.act_fn, name='se')(x) x = create_conv(self.out_features, self.pw_kernel_size, stride=self.stride, dilation=self.dilation, padding=self.pad_type, conv_layer=self.conv_layer, name='conv_pwl')(x) x = self.norm_layer(name='bn_pwl')(x, training=training) if (((self.stride == 1) and (self.in_features == self.out_features)) and (not self.noskip)): x = DropPath(self.drop_path_rate)(x, training=training) x = (x + shortcut) return x
class Head(nn.Module): ' Standard Head from EfficientNet, MixNet, MNasNet, MobileNetV2, etc. ' num_features: int num_classes: int = 1000 global_pool: str = 'avg' drop_rate: float = 0.0 dtype: Dtype = jnp.float32 conv_layer: ModuleDef = conv2d norm_layer: ModuleDef = batchnorm2d linear_layer: ModuleDef = linear act_fn: Callable = nn.relu @nn.compact def __call__(self, x, training: bool): x = self.conv_layer(self.num_features, 1, name='conv_pw')(x) x = self.norm_layer(name='bn')(x, training=training) x = self.act_fn(x) if (self.global_pool == 'avg'): x = jnp.asarray(x, jnp.float32) x = x.mean((1, 2)) x = jnp.asarray(x, self.dtype) x = Dropout(rate=self.drop_rate)(x, training=training) if (self.num_classes > 0): x = self.linear_layer(self.num_classes, bias=True, name='classifier')(x) return x
class EfficientHead(nn.Module): ' EfficientHead for MobileNetV3. ' num_features: int num_classes: int = 1000 global_pool: str = 'avg' drop_rate: float = 0.0 dtype: Dtype = jnp.float32 conv_layer: ModuleDef = conv2d norm_layer: ModuleDef = None linear_layer: ModuleDef = linear act_fn: Callable = nn.relu @nn.compact def __call__(self, x, training: bool): if (self.global_pool == 'avg'): x = jnp.asarray(x, jnp.float32) x = x.mean((1, 2), keepdims=True) x = jnp.asarray(x, self.dtype) x = self.conv_layer(self.num_features, 1, bias=True, name='conv_pw')(x) x = self.act_fn(x) x = Dropout(rate=self.drop_rate)(x, training=training) if (self.num_classes > 0): x = self.linear_layer(self.num_classes, bias=True, name='classifier')(x) return x
def chan_to_features(kwargs): in_chs = kwargs.pop('in_chs', None) if (in_chs is not None): kwargs['in_features'] = in_chs out_chs = kwargs.pop('out_chs', None) if (out_chs is not None): kwargs['out_features'] = out_chs return kwargs
class BlockFactory(): @staticmethod def CondConv(stage_idx, block_idx, **block_args): assert False, 'Not currently impl' @staticmethod def InvertedResidual(stage_idx, block_idx, **block_args): block_args = chan_to_features(block_args) return InvertedResidual(**block_args, name=f'blocks_{stage_idx}_{block_idx}') @staticmethod def DepthwiseSeparable(stage_idx, block_idx, **block_args): block_args = chan_to_features(block_args) return DepthwiseSeparable(**block_args, name=f'blocks_{stage_idx}_{block_idx}') @staticmethod def EdgeResidual(stage_idx, block_idx, **block_args): block_args = chan_to_features(block_args) block_args.pop('fake_in_chs') return EdgeResidual(**block_args, name=f'blocks_{stage_idx}_{block_idx}') @staticmethod def ConvBnAct(stage_idx, block_idx, **block_args): block_args.pop('drop_path_rate', None) block_args.pop('se_layer', None) block_args = chan_to_features(block_args) return ConvBnAct(**block_args, name=f'blocks_{stage_idx}_{block_idx}') @staticmethod def get_act_fn(act_fn: Union[(str, Callable)]): return (get_act_fn(act_fn) if isinstance(act_fn, str) else act_fn)
class EfficientNet(nn.Module): ' EfficientNet (and other MBConvNets)\n * EfficientNet B0-B8, L2\n * EfficientNet-EdgeTPU\n * EfficientNet-Lite\n * MixNet S, M, L, XL\n * MobileNetV3\n * MobileNetV2\n * MnasNet A1, B1, and small\n * FBNet C\n * Single-Path NAS Pixel1\n ' block_defs: Sequence[Sequence[Dict]] stem_size: int = 32 feat_multiplier: float = 1.0 feat_divisor: int = 8 feat_min: int = None fix_stem: bool = False pad_type: str = 'LIKE' output_stride: int = 32 efficient_head: bool = False num_classes: int = 1000 num_features: int = 1280 global_pool: str = 'avg' default_cfg: Dict = None drop_rate: float = 0.0 drop_path_rate: float = 0.0 dtype: Dtype = jnp.float32 conv_layer: ModuleDef = conv2d norm_layer: ModuleDef = batchnorm2d se_layer: ModuleDef = SqueezeExcite act_fn: Callable = nn.relu @nn.compact def __call__(self, x, training: bool): lkwargs = dict(conv_layer=partial(self.conv_layer, dtype=self.dtype, kernel_init=effnet_normal()), norm_layer=partial(self.norm_layer, dtype=self.dtype), act_fn=self.act_fn) se_layer = partial(self.se_layer, dtype=self.dtype) linear_layer = partial(linear, dtype=self.dtype, kernel_init=effnet_uniform()) stem_features = self.stem_size if (not self.fix_stem): stem_features = round_features(self.stem_size, self.feat_multiplier, self.feat_divisor, self.feat_min) x = ConvBnAct(out_features=stem_features, kernel_size=3, stride=2, pad_type=self.pad_type, **lkwargs, name='stem')(x, training=training) blocks = EfficientNetBuilder(stem_features, self.block_defs, BlockFactory(), feat_multiplier=self.feat_multiplier, feat_divisor=self.feat_divisor, feat_min=self.feat_min, output_stride=self.output_stride, pad_type=self.pad_type, se_layer=se_layer, **lkwargs, drop_path_rate=self.drop_path_rate)() for stage in blocks: for block in stage: x = block(x, training=training) head_layer = (EfficientHead if self.efficient_head else Head) x = head_layer(num_features=self.num_features, num_classes=self.num_classes, drop_rate=self.drop_rate, **lkwargs, dtype=self.dtype, linear_layer=linear_layer, name='head')(x, training=training) return x
def _filter(state_dict): ' convert state dict keys from pytorch style origins to flax linen ' out = {} p_blocks = re.compile('blocks\\.(\\d)\\.(\\d)') p_bn_scale = re.compile('bn(\\w*)\\.weight') for (k, v) in state_dict.items(): k = p_blocks.sub('blocks_\\1_\\2', k) k = p_bn_scale.sub('bn\\1.scale', k) k = k.replace('running_mean', 'mean') k = k.replace('running_var', 'var') k = k.replace('.weight', '.kernel') out[k] = v return out
def create_model(variant, pretrained=False, rng=None, input_shape=None, dtype=jnp.float32, **kwargs): model_cfg = get_model_cfg(variant) model_args = model_cfg['arch_fn'](variant, **model_cfg['arch_cfg']) model_args.update(kwargs) se_args = model_args.pop('se_cfg', {}) if ('se_layer' not in model_args): if ('bound_act_fn' in se_args): se_args['bound_act_fn'] = get_act_fn(se_args['bound_act_fn']) if ('gate_fn' in se_args): se_args['gate_fn'] = get_act_fn(se_args['gate_fn']) model_args['se_layer'] = partial(SqueezeExcite, **se_args) bn_args = model_args.pop('bn_cfg') if ('norm_layer' not in model_args): model_args['norm_layer'] = partial(batchnorm2d, **bn_args) model_args['act_fn'] = get_act_fn(model_args.pop('act_fn', 'relu')) model = EfficientNet(dtype=dtype, default_cfg=model_cfg['default_cfg'], **model_args) rng = (jax.random.PRNGKey(0) if (rng is None) else rng) (params_rng, dropout_rng) = jax.random.split(rng) input_shape = (model_cfg['default_cfg']['input_size'] if (input_shape is None) else input_shape) input_shape = (1, input_shape[1], input_shape[2], input_shape[0]) variables = model.init({'params': params_rng, 'dropout': dropout_rng}, jnp.ones(input_shape, dtype=dtype), training=False) if pretrained: variables = load_pretrained(variables, default_cfg=model.default_cfg, filter_fn=_filter) return (model, variables)
@struct.dataclass class EmaState(): decay: float = struct.field(pytree_node=False, default=0.0) variables: flax.core.FrozenDict[(str, Any)] = None @staticmethod def create(decay, variables): 'Initialize ema state' if (decay == 0.0): return EmaState() ema_variables = jax.tree_map((lambda x: x), variables) return EmaState(decay, ema_variables) def update(self, new_variables): if (self.decay == 0.0): return self.replace(variables=None) new_ema_variables = jax.tree_multimap((lambda ema, p: ((ema * self.decay) + ((1.0 - self.decay) * p))), self.variables, new_variables) return self.replace(variables=new_ema_variables)
def load_pretrained(variables, url='', default_cfg=None, filter_fn=None): if (not url): assert ((default_cfg is not None) and default_cfg['url']) url = default_cfg['url'] state_dict = load_state_dict_from_url(url, transpose=True) (source_params, source_state) = split_state_dict(state_dict) if (filter_fn is not None): source_params = filter_fn(source_params) source_state = filter_fn(source_state) var_unfrozen = unfreeze(variables) missing_keys = [] flat_params = flatten_dict(var_unfrozen['params']) flat_param_keys = set() for (k, v) in flat_params.items(): flat_k = '.'.join(k) if (flat_k in source_params): assert (flat_params[k].shape == v.shape) flat_params[k] = source_params[flat_k] else: missing_keys.append(flat_k) flat_param_keys.add(flat_k) unexpected_keys = list(set(source_params.keys()).difference(flat_param_keys)) params = freeze(unflatten_dict(flat_params)) flat_state = flatten_dict(var_unfrozen['batch_stats']) flat_state_keys = set() for (k, v) in flat_state.items(): flat_k = '.'.join(k) if (flat_k in source_state): assert (flat_state[k].shape == v.shape) flat_state[k] = source_state[flat_k] else: missing_keys.append(flat_k) flat_state_keys.add(flat_k) unexpected_keys.extend(list(set(source_state.keys()).difference(flat_state_keys))) batch_stats = freeze(unflatten_dict(flat_state)) if missing_keys: print(f' WARNING: {len(missing_keys)} keys missing while loading state_dict. {str(missing_keys)}') if unexpected_keys: print(f' WARNING: {len(unexpected_keys)} unexpected keys found while loading state_dict. {str(unexpected_keys)}') return dict(params=params, batch_stats=batch_stats)
def get_act_fn(name='relu', **kwargs): name = name.lower() assert (name in _ACT_FN) act_fn = _ACT_FN[name] if kwargs: act_fn = partial(act_fn, **kwargs) return act_fn
def conv2d(features: int, kernel_size: int, stride: Optional[int]=None, padding: Union[(str, Tuple[(int, int)])]=0, dilation: Optional[int]=None, groups: int=1, bias: bool=False, dtype: Dtype=jnp.float32, precision: Any=None, name: Optional[str]=None, kernel_init: Callable[([PRNGKey, Shape, Dtype], Array)]=default_kernel_init, bias_init: Callable[([PRNGKey, Shape, Dtype], Array)]=initializers.zeros): stride = (stride or 1) dilation = (dilation or 1) if isinstance(padding, str): if (padding == 'LIKE'): padding = get_like_padding(kernel_size, stride, dilation) padding = to_tuple(padding, 2) padding = [padding, padding] else: padding = to_tuple(padding, 2) padding = [padding, padding] return nn.Conv(features=features, kernel_size=to_tuple(kernel_size, 2), strides=to_tuple(stride, 2), padding=padding, kernel_dilation=to_tuple(dilation, 2), feature_group_count=groups, use_bias=bias, dtype=dtype, precision=precision, name=name, kernel_init=kernel_init, bias_init=bias_init)
def linear(features: int, bias: bool=True, dtype: Dtype=jnp.float32, name: str=None, kernel_init: Callable[([PRNGKey, Shape, Dtype], Array)]=default_kernel_init, bias_init: Callable[([PRNGKey, Shape, Dtype], Array)]=initializers.zeros): return nn.Dense(features=features, use_bias=bias, dtype=dtype, name=name, kernel_init=kernel_init, bias_init=bias_init)
def _split_channels(num_feat, num_groups): split = [(num_feat // num_groups) for _ in range(num_groups)] split[0] += (num_feat - sum(split)) return split
def _to_list(x): if isinstance(x, int): return [x] return x
class MixedConv(nn.Module): ' Mixed Grouped Convolution\n Based on MDConv and GroupedConv in MixNet impl:\n https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py\n ' features: int kernel_size: Union[(List[int], int)] = 3 dilation: int = 1 stride: int = 1 padding: Union[(str, Tuple[(int, int)])] = 0 depthwise: bool = False bias: bool = False conv_layer: ModuleDef = conv2d @nn.compact def __call__(self, x): num_groups = len(_to_list(self.kernel_size)) in_splits = np.array(_split_channels(x.shape[(- 1)], num_groups)).cumsum()[:(- 1)] out_splits = _split_channels(self.features, num_groups) x_split = jnp.split(x, in_splits, axis=3) x_out = [self.conv_layer(feat, kernel_size=k, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=(feat if self.depthwise else 1), bias=self.bias, name=f'{idx}')(x_split[idx]) for (idx, (k, feat)) in enumerate(zip(self.kernel_size, out_splits))] x = jnp.concatenate(x_out, axis=3) return x
def _absolute_dims(rank, dims): return tuple([((rank + dim) if (dim < 0) else dim) for dim in dims])
class BatchNorm(nn.Module): 'BatchNorm Module.\n\n NOTE: A BatchNorm layer similar to Flax ver, but with diff of squares for var cal for numerical\n comparisons. Also, removed cross-process reduction in this variation (for now).\n\n Attributes:\n axis: the feature or non-batch axis of the input.\n momentum: decay rate for the exponential moving average of the batch statistics.\n epsilon: a small float added to variance to avoid dividing by zero.\n dtype: the dtype of the computation (default: float32).\n bias: if True, bias (beta) is added.\n scale: if True, multiply by scale (gamma).\n When the next layer is linear (also e.g. nn.relu), this can be disabled\n since the scaling will be done by the next layer.\n bias_init: initializer for bias, by default, zero.\n scale_init: initializer for scale, by default, one.\n ' axis: int = (- 1) momentum: float = 0.99 epsilon: float = 1e-05 dtype: Dtype = jnp.float32 use_bias: bool = True use_scale: bool = True bias_init: Callable[([PRNGKey, Shape, Dtype], Array)] = initializers.zeros scale_init: Callable[([PRNGKey, Shape, Dtype], Array)] = initializers.ones @nn.compact def __call__(self, x, training: bool): 'Normalizes the input using batch statistics.\n Args:\n x: the input to be normalized.\n Returns:\n Normalized inputs (the same shape as inputs).\n ' x = jnp.asarray(x, jnp.float32) axis = (self.axis if isinstance(self.axis, tuple) else (self.axis,)) axis = _absolute_dims(x.ndim, axis) feature_shape = tuple(((d if (i in axis) else 1) for (i, d) in enumerate(x.shape))) reduced_feature_shape = tuple((d for (i, d) in enumerate(x.shape) if (i in axis))) reduction_axis = tuple((i for i in range(x.ndim) if (i not in axis))) initializing = (not self.has_variable('batch_stats', 'mean')) ra_mean = self.variable('batch_stats', 'mean', (lambda s: jnp.zeros(s, jnp.float32)), reduced_feature_shape) ra_var = self.variable('batch_stats', 'var', (lambda s: jnp.ones(s, jnp.float32)), reduced_feature_shape) if (not training): (mean, var) = (ra_mean.value, ra_var.value) else: mean = jnp.mean(x, axis=reduction_axis, keepdims=False) var = jnp.mean(((x - mean) ** 2), axis=reduction_axis, keepdims=False) if (not initializing): ra_mean.value = ((self.momentum * ra_mean.value) + ((1 - self.momentum) * mean)) ra_var.value = ((self.momentum * ra_var.value) + ((1 - self.momentum) * var)) y = (x - mean.reshape(feature_shape)) mul = lax.rsqrt((var + self.epsilon)) if self.use_scale: scale = self.param('scale', self.scale_init, reduced_feature_shape).reshape(feature_shape) mul = (mul * scale) y = (y * mul) if self.use_bias: bias = self.param('bias', self.bias_init, reduced_feature_shape).reshape(feature_shape) y = (y + bias) return jnp.asarray(y, self.dtype)
class FlaxBatchNorm(nn.Module): ' FlaxBatchNorm Module.\n\n NOTE: A copy of the official Flax BN layer, w/ diff of squares variance and cross-process batch stats syncing.\n\n Attributes:\n axis: the feature or non-batch axis of the input.\n momentum: decay rate for the exponential moving average of the batch statistics.\n epsilon: a small float added to variance to avoid dividing by zero.\n dtype: the dtype of the computation (default: float32).\n bias: if True, bias (beta) is added.\n scale: if True, multiply by scale (gamma).\n When the next layer is linear (also e.g. nn.relu), this can be disabled\n since the scaling will be done by the next layer.\n bias_init: initializer for bias, by default, zero.\n scale_init: initializer for scale, by default, one.\n axis_name: the axis name used to combine batch statistics from multiple\n devices. See `jax.pmap` for a description of axis names (default: None).\n axis_index_groups: groups of axis indices within that named axis\n representing subsets of devices to reduce over (default: None). For\n example, `[[0, 1], [2, 3]]` would independently batch-normalize over\n the examples on the first two and last two devices. See `jax.lax.psum` for more details.\n ' axis: int = (- 1) momentum: float = 0.99 epsilon: float = 1e-05 dtype: Dtype = jnp.float32 use_bias: bool = True use_scale: bool = True bias_init: Callable[([PRNGKey, Shape, Dtype], Array)] = initializers.zeros scale_init: Callable[([PRNGKey, Shape, Dtype], Array)] = initializers.ones axis_name: Optional[str] = None axis_index_groups: Any = None @nn.compact def __call__(self, x, training: bool): 'Normalizes the input using batch statistics.\n Args:\n x: the input to be normalized.\n Returns:\n Normalized inputs (the same shape as inputs).\n ' x = jnp.asarray(x, jnp.float32) axis = (self.axis if isinstance(self.axis, tuple) else (self.axis,)) axis = _absolute_dims(x.ndim, axis) feature_shape = tuple(((d if (i in axis) else 1) for (i, d) in enumerate(x.shape))) reduced_feature_shape = tuple((d for (i, d) in enumerate(x.shape) if (i in axis))) reduction_axis = tuple((i for i in range(x.ndim) if (i not in axis))) initializing = (not self.has_variable('batch_stats', 'mean')) ra_mean = self.variable('batch_stats', 'mean', (lambda s: jnp.zeros(s, jnp.float32)), reduced_feature_shape) ra_var = self.variable('batch_stats', 'var', (lambda s: jnp.ones(s, jnp.float32)), reduced_feature_shape) if (not training): (mean, var) = (ra_mean.value, ra_var.value) else: mean = jnp.mean(x, axis=reduction_axis, keepdims=False) mean2 = jnp.mean(lax.square(x), axis=reduction_axis, keepdims=False) if ((self.axis_name is not None) and (not initializing)): concatenated_mean = jnp.concatenate([mean, mean2]) (mean, mean2) = jnp.split(lax.pmean(concatenated_mean, axis_name=self.axis_name, axis_index_groups=self.axis_index_groups), 2) var = (mean2 - lax.square(mean)) if (not initializing): ra_mean.value = ((self.momentum * ra_mean.value) + ((1 - self.momentum) * mean)) ra_var.value = ((self.momentum * ra_var.value) + ((1 - self.momentum) * var)) y = (x - mean.reshape(feature_shape)) mul = lax.rsqrt((var + self.epsilon)) if self.use_scale: scale = self.param('scale', self.scale_init, reduced_feature_shape).reshape(feature_shape) mul = (mul * scale) y = (y * mul) if self.use_bias: bias = self.param('bias', self.bias_init, reduced_feature_shape).reshape(feature_shape) y = (y + bias) return jnp.asarray(y, self.dtype)
class L1BatchNorm(nn.Module): 'L1 BatchNorm Module.\n\n Attributes:\n axis: the feature or non-batch axis of the input.\n momentum: decay rate for the exponential moving average of the batch statistics.\n epsilon: a small float added to variance to avoid dividing by zero.\n dtype: the dtype of the computation (default: float32).\n bias: if True, bias (beta) is added.\n scale: if True, multiply by scale (gamma).\n When the next layer is linear (also e.g. nn.relu), this can be disabled\n since the scaling will be done by the next layer.\n bias_init: initializer for bias, by default, zero.\n scale_init: initializer for scale, by default, one.\n ' axis: int = (- 1) momentum: float = 0.99 epsilon: float = 1e-05 dtype: Dtype = jnp.float32 use_bias: bool = True use_scale: bool = True bias_init: Callable[([PRNGKey, Shape, Dtype], Array)] = initializers.zeros scale_init: Callable[([PRNGKey, Shape, Dtype], Array)] = initializers.ones @nn.compact def __call__(self, x, training: bool): 'Normalizes the input using batch statistics.\n Args:\n x: the input to be normalized.\n Returns:\n Normalized inputs (the same shape as inputs).\n ' x = jnp.asarray(x, self.dtype) axis = (self.axis if isinstance(self.axis, tuple) else (self.axis,)) axis = _absolute_dims(x.ndim, axis) feature_shape = tuple(((d if (i in axis) else 1) for (i, d) in enumerate(x.shape))) reduced_feature_shape = tuple((d for (i, d) in enumerate(x.shape) if (i in axis))) reduction_axis = tuple((i for i in range(x.ndim) if (i not in axis))) initializing = (not self.has_variable('batch_stats', 'mean')) ra_mean = self.variable('batch_stats', 'mean', (lambda s: jnp.zeros(s, jnp.float32)), reduced_feature_shape) ra_var = self.variable('batch_stats', 'var', (lambda s: jnp.ones(s, jnp.float32)), reduced_feature_shape) if (not training): (mean, var) = (ra_mean.value, ra_var.value) else: mean = jnp.mean(x, axis=reduction_axis, keepdims=False) var = (jnp.mean(lax.abs((x - mean)), axis=reduction_axis, keepdims=False) * jnp.sqrt((jnp.pi / 2))) if ((self.axis_name is not None) and (not initializing)): concatenated_mean = jnp.concatenate([mean, var]) (mean, var) = jnp.split(lax.pmean(concatenated_mean, axis_name=self.axis_name, axis_index_groups=self.axis_index_groups), 2) if (not initializing): ra_mean.value = ((self.momentum * ra_mean.value) + ((1 - self.momentum) * mean)) ra_var.value = ((self.momentum * ra_var.value) + ((1 - self.momentum) * var)) mean = jnp.asarray(mean, self.dtype) var = jnp.asarray(var, self.dtype) y = (x - mean.reshape(feature_shape)) mul = lax.reciprocal((var + self.epsilon)) if self.use_scale: scale = self.param('scale', self.scale_init, reduced_feature_shape).reshape(feature_shape) scale = jnp.asarray(scale, self.dtype) mul = (mul * scale) y = (y * mul) if self.use_bias: bias = self.param('bias', self.bias_init, reduced_feature_shape).reshape(feature_shape) bias = jnp.asarray(bias, self.dtype) y = (y + bias) return jnp.asarray(y, self.dtype)
def batchnorm2d(eps=0.001, momentum=0.99, affine=True, dtype: Dtype=jnp.float32, name: Optional[str]=None, variant: str='', bias_init: Callable[([PRNGKey, Shape, Dtype], Array)]=initializers.zeros, weight_init: Callable[([PRNGKey, Shape, Dtype], Array)]=initializers.ones): layer = BatchNorm if (variant == 'flax'): layer = FlaxBatchNorm elif (variant == 'l1'): layer = L1BatchNorm return layer(momentum=momentum, epsilon=eps, use_bias=affine, use_scale=affine, dtype=dtype, name=name, bias_init=bias_init, scale_init=weight_init)
class Dropout(nn.Module): ' Dropout layer.\n Attributes:\n rate: the dropout probability. (_not_ the keep rate!)\n ' rate: float @nn.compact def __call__(self, x, training: bool, rng: PRNGKey=None): 'Applies a random dropout mask to the input.\n Args:\n x: the inputs that should be randomly masked.\n training: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned as is.\n rng: an optional `jax.random.PRNGKey`. By default `nn.make_rng()` will be used.\n Returns:\n The masked inputs reweighted to preserve mean.\n ' if ((self.rate == 0.0) or (not training)): return x keep_prob = (1.0 - self.rate) if (rng is None): rng = self.make_rng('dropout') mask = random.bernoulli(rng, p=keep_prob, shape=x.shape) return lax.select(mask, (x / keep_prob), jnp.zeros_like(x))
def drop_path(x: jnp.array, drop_rate: float=0.0, rng=None) -> jnp.array: "Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n\n This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,\n the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...\n See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for\n changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use\n 'survival rate' as the argument.\n\n " if (drop_rate == 0.0): return x keep_prob = (1.0 - drop_rate) if (rng is None): rng = make_rng() mask = random.bernoulli(key=rng, p=keep_prob, shape=(x.shape[0], 1, 1, 1)) mask = jnp.broadcast_to(mask, x.shape) return lax.select(mask, (x / keep_prob), jnp.zeros_like(x))
class DropPath(nn.Module): rate: float = 0.0 @nn.compact def __call__(self, x, training: bool, rng: PRNGKey=None): if ((not training) or (self.rate == 0.0)): return x if (rng is None): rng = self.make_rng('dropout') return drop_path(x, self.rate, rng)
def create_conv(in_channels, out_channels, kernel_size, conv_layer=None, **kwargs): ' Select a convolution implementation based on arguments\n Creates and returns one of Conv, MixedConv, or CondConv (TODO)\n ' conv_layer = (Conv2d if (conv_layer is None) else conv_layer) if isinstance(kernel_size, list): assert ('num_experts' not in kwargs) assert ('groups' not in kwargs) m = MixedConv(in_channels, out_channels, kernel_size, conv_layer=conv_layer, **kwargs) else: depthwise = kwargs.pop('depthwise', False) groups = (in_channels if depthwise else kwargs.pop('groups', 1)) m = conv_layer(in_channels, out_channels, kernel_size, groups=groups, **kwargs) return m
class SqueezeExcite(Module): def __init__(self, in_chs, se_ratio=0.25, block_chs=None, reduce_from_block=True, conv_layer=Conv2d, act_fn=F.relu, bound_act_fn=None, gate_fn=F.sigmoid, divisor=1): super(SqueezeExcite, self).__init__() base_features = (block_chs if (block_chs and reduce_from_block) else in_chs) reduced_chs = make_divisible((base_features * se_ratio), divisor) self.reduce = conv_layer(in_chs, reduced_chs, 1, bias=True) self.act_fn = (bound_act_fn if (bound_act_fn is not None) else act_fn) self.expand = conv_layer(reduced_chs, in_chs, 1, bias=True) self.gate_fn = gate_fn def __call__(self, x): x_se = x.mean((2, 3), keepdims=True) x_se = self.reduce(x_se) x_se = self.act_fn(x_se) x_se = self.expand(x_se) return (x * self.gate_fn(x_se))
class ConvBnAct(Module): def __init__(self, in_chs, out_chs, kernel_size, stride=1, dilation=1, pad_type='LIKE', conv_layer=Conv2d, norm_layer=BatchNorm2d, act_fn=F.relu): super(ConvBnAct, self).__init__() self.conv = conv_layer(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) self.bn = norm_layer(out_chs) self.act_fn = act_fn def __call__(self, x, training: bool): x = self.conv(x) x = self.bn(x, training=training) x = self.act_fn(x) return x
class DepthwiseSeparable(Module): ' DepthwiseSeparable block\n Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion\n (factor of 1.0). This is an alternative to having a IR with an optional first pw conv.\n ' def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='LIKE', noskip=False, pw_kernel_size=1, pw_act=False, se_ratio=0.0, conv_layer=Conv2d, norm_layer=BatchNorm2d, se_layer=None, act_fn=F.relu, drop_path_rate=0.0): super(DepthwiseSeparable, self).__init__() self.has_residual = (((stride == 1) and (in_chs == out_chs)) and (not noskip)) self.has_pw_act = pw_act self.drop_path_rate = drop_path_rate self.conv_dw = create_conv(in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True, conv_layer=conv_layer) self.bn_dw = norm_layer(in_chs) self.act_fn = act_fn self.se = None if ((se_layer is not None) and (se_ratio > 0.0)): self.se = se_layer(in_chs, se_ratio=se_ratio, act_fn=act_fn) self.conv_pw = create_conv(in_chs, out_chs, pw_kernel_size, padding=pad_type, conv_layer=conv_layer) self.bn_pw = norm_layer(out_chs) def __call__(self, x, training: bool): shortcut = x x = self.conv_dw(x) x = self.bn_dw(x, training=training) x = self.act_fn(x) if (self.se is not None): x = self.se(x) x = self.conv_pw(x) x = self.bn_pw(x, training=training) if self.has_pw_act: x = self.act_fn(x) if self.has_residual: if training: x = drop_path(x, drop_prob=self.drop_path_rate) x += shortcut return x
class InvertedResidual(Module): ' Inverted residual block w/ optional SE and CondConv routing' def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='LIKE', noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, se_ratio=0.0, conv_layer=Conv2d, norm_layer=BatchNorm2d, se_layer=None, act_fn=F.relu, drop_path_rate=0.0): super(InvertedResidual, self).__init__() mid_chs = make_divisible((in_chs * exp_ratio)) self.has_residual = (((in_chs == out_chs) and (stride == 1)) and (not noskip)) self.drop_path_rate = drop_path_rate self.conv_exp = create_conv(in_chs, mid_chs, exp_kernel_size, padding=pad_type, conv_layer=conv_layer) self.bn_exp = norm_layer(mid_chs) self.act_fn = act_fn self.conv_dw = create_conv(mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True, conv_layer=conv_layer) self.bn_dw = norm_layer(mid_chs) self.se = None if ((se_layer is not None) and (se_ratio > 0.0)): self.se = se_layer(mid_chs, block_chs=in_chs, se_ratio=se_ratio, act_fn=act_fn) self.conv_pwl = create_conv(mid_chs, out_chs, pw_kernel_size, padding=pad_type, conv_layer=conv_layer) self.bn_pwl = norm_layer(out_chs) def __call__(self, x, training: bool): shortcut = x x = self.conv_exp(x) x = self.bn_exp(x, training=training) x = self.act_fn(x) x = self.conv_dw(x) x = self.bn_dw(x, training=training) x = self.act_fn(x) if (self.se is not None): x = self.se(x) x = self.conv_pwl(x) x = self.bn_pwl(x, training=training) if self.has_residual: if training: x = drop_path(x, drop_prob=self.drop_path_rate) x += shortcut return x
class EdgeResidual(Module): ' Residual block with expansion convolution followed by pointwise-linear w/ stride' def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0, stride=1, dilation=1, pad_type='LIKE', noskip=False, pw_kernel_size=1, se_ratio=0.0, conv_layer=Conv2d, norm_layer=BatchNorm2d, se_layer=None, act_fn=F.relu, drop_path_rate=0.0): super(EdgeResidual, self).__init__() _in_chs = (fake_in_chs if (fake_in_chs > 0) else in_chs) mid_chs = make_divisible((_in_chs * exp_ratio)) self.has_residual = (((in_chs == out_chs) and (stride == 1)) and (not noskip)) self.drop_path_rate = drop_path_rate self.conv_exp = create_conv(in_chs, mid_chs, exp_kernel_size, padding=pad_type, conv_layer=conv_layer) self.bn_exp = norm_layer(mid_chs) self.act_fn = act_fn self.se = None if ((se_layer is not None) and (se_ratio > 0.0)): self.se = se_layer(mid_chs, block_chs=in_chs, se_ratio=se_ratio, act_fn=act_fn) self.conv_pwl = create_conv(mid_chs, out_chs, pw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, conv_layer=conv_layer) self.bn_pwl = norm_layer(out_chs) def __call__(self, x, training: bool): shortcut = x x = self.conv_exp(x) x = self.bn_exp(x, training=training) x = self.act_fn(x) if (self.se is not None): x = self.se(x) x = self.conv_pwl(x) x = self.bn_pwl(x, training=training) if self.has_residual: if training: x = drop_path(x, drop_prob=self.drop_path_rate) x = (x + shortcut) return x
class EfficientHead(Module): ' EfficientHead from MobileNetV3 ' def __init__(self, in_chs: int, num_features: int, num_classes: int=1000, global_pool: str='avg', act_fn='relu', conv_layer=Conv2d, norm_layer=None): self.global_pool = global_pool self.conv_pw = conv_layer(in_chs, num_features, 1, bias=True) self.act_fn = act_fn if (num_classes > 0): self.classifier = Linear(num_features, num_classes, bias=True) else: self.classifier = None def __call__(self, x: JaxArray, training: bool) -> JaxArray: if (self.global_pool == 'avg'): x = x.mean((2, 3), keepdims=True) x = self.conv_pw(x).reshape(x.shape[0], (- 1)) x = self.act_fn(x) x = self.classifier(x) return x
class Head(Module): ' Standard Head from EfficientNet, MixNet, MNasNet, MobileNetV2, etc. ' def __init__(self, in_chs: int, num_features: int, num_classes: int=1000, global_pool: str='avg', act_fn=F.relu, conv_layer=Conv2d, norm_layer=BatchNorm2d): self.global_pool = global_pool self.conv_pw = conv_layer(in_chs, num_features, 1) self.bn = norm_layer(num_features) self.act_fn = act_fn if (num_classes > 0): self.classifier = Linear(num_features, num_classes, bias=True) else: self.classifier = None def __call__(self, x: JaxArray, training: bool) -> JaxArray: x = self.conv_pw(x) x = self.bn(x, training=training) x = self.act_fn(x) if (self.global_pool == 'avg'): x = x.mean((2, 3)) if (self.classifier is not None): x = self.classifier(x) return x
class BlockFactory(): @staticmethod def CondConv(stage_idx, block_idx, **block_args): assert False, 'Not currently impl' @staticmethod def InvertedResidual(stage_idx, block_idx, **block_args): return InvertedResidual(**block_args) @staticmethod def DepthwiseSeparable(stage_idx, block_idx, **block_args): return DepthwiseSeparable(**block_args) @staticmethod def EdgeResidual(stage_idx, block_idx, **block_args): return EdgeResidual(**block_args) @staticmethod def ConvBnAct(stage_idx, block_idx, **block_args): block_args.pop('drop_path_rate', None) block_args.pop('se_layer', None) return ConvBnAct(**block_args) @staticmethod def get_act_fn(act_fn: Union[(str, Callable)]): return (get_act_fn(act_fn) if isinstance(act_fn, str) else act_fn)
class EfficientNet(Module): ' EfficientNet (and other MBConvNets)\n * EfficientNet B0-B8, L2\n * EfficientNet-EdgeTPU\n * EfficientNet-Lite\n * MixNet S, M, L, XL\n * MobileNetV3\n * MobileNetV2\n * MnasNet A1, B1, and small\n * FBNet C\n * Single-Path NAS Pixel1\n ' def __init__(self, block_defs, num_classes: int=1000, num_features: int=1280, drop_rate: float=0.0, global_pool: str='avg', feat_multiplier: float=1.0, feat_divisor: int=8, feat_min: Optional[int]=None, in_chs: int=3, stem_size: int=32, fix_stem: bool=False, output_stride: int=32, efficient_head: bool=False, pad_type: str='LIKE', conv_layer=Conv2d, norm_layer=BatchNorm2d, se_layer=SqueezeExcite, act_fn=F.relu, drop_path_rate: float=0.0): super(EfficientNet, self).__init__() self.num_classes = num_classes self.num_features = num_features self.drop_rate = drop_rate cba_kwargs = dict(conv_layer=conv_layer, norm_layer=norm_layer, act_fn=act_fn) if (not fix_stem): stem_size = round_features(stem_size, feat_multiplier, feat_divisor, feat_min) self.stem = ConvBnAct(in_chs, stem_size, 3, stride=2, pad_type=pad_type, **cba_kwargs) builder = EfficientNetBuilder(stem_size, block_defs, BlockFactory(), feat_multiplier=feat_multiplier, feat_divisor=feat_divisor, feat_min=feat_min, output_stride=output_stride, pad_type=pad_type, se_layer=se_layer, **cba_kwargs, drop_path_rate=drop_path_rate) self.blocks = nn.Sequential([nn.Sequential(b) for b in builder()]) self.feature_info = builder.features head_chs = builder.in_chs head_layer = (EfficientHead if efficient_head else Head) self.head = head_layer(head_chs, self.num_features, self.num_classes, global_pool=global_pool, **cba_kwargs) def get_classifier(self): return self.head.classifier def forward_features(self, x: JaxArray, training: bool) -> JaxArray: x = self.stem(x, training=training) x = self.blocks(x, training=training) return x def __call__(self, x: JaxArray, training: bool) -> JaxArray: x = self.forward_features(x, training=training) x = self.head(x, training=training) return x
def create_model(variant, pretrained=False, **kwargs): model_cfg = get_model_cfg(variant) model_args = model_cfg['arch_fn'](variant, **model_cfg['arch_cfg']) model_args.update(kwargs) se_args = model_args.pop('se_cfg', {}) if ('se_layer' not in model_args): if ('bound_act_fn' in se_args): se_args['bound_act_fn'] = get_act_fn(se_args['bound_act_fn']) if ('gate_fn' in se_args): se_args['gate_fn'] = get_act_fn(se_args['gate_fn']) model_args['se_layer'] = partial(SqueezeExcite, **se_args) bn_args = model_args.pop('bn_cfg') if ('norm_layer' not in model_args): model_args['norm_layer'] = partial(BatchNorm2d, **bn_args) model_args['act_fn'] = get_act_fn(model_args.pop('act_fn', 'relu')) model = EfficientNet(**model_args) model.default_cfg = model_cfg['default_cfg'] if pretrained: load_pretrained(model, default_cfg=model.default_cfg) return model
def load_pretrained(model, url='', default_cfg=None, filter_fn=None): if (not url): assert ((default_cfg is not None) and default_cfg['url']) url = default_cfg['url'] model_vars = model.vars() jax_state_dict = load_state_dict_from_url(url=url, transpose=False) if (filter_fn is not None): jax_state_dict = filter_fn(jax_state_dict) model_vars.assign(jax_state_dict.values())
def get_act_fn(name='relu', **kwargs): name = name.lower() assert (name in _ACT_FN) act_fn = _ACT_FN[name] if kwargs: act_fn = partial(act_fn, **kwargs) return act_fn
def drop_path(x: JaxArray, drop_prob: float=0.0, generator=random.DEFAULT_GENERATOR) -> JaxArray: "Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n\n This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,\n the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...\n See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for\n changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use\n 'survival rate' as the argument.\n\n " if (drop_prob == 0.0): return x keep_prob = (1 - drop_prob) keep_shape = (x.shape[0], 1, 1, 1) keep_mask = (keep_prob + jr.bernoulli(generator.key(), p=keep_prob, shape=keep_shape)) output = ((x / keep_prob) * keep_mask) return output
class Conv2d(Module): 'Applies a 2D convolution on a 4D-input batch of shape (N,C,H,W).' def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[(Tuple[(int, int)], int)], stride: Union[(Tuple[(int, int)], int)]=1, padding: Union[(str, Tuple[(int, int)], int)]=0, dilation: Union[(Tuple[(int, int)], int)]=1, groups: int=1, bias: bool=False, kernel_init: Callable=kaiming_normal, bias_init: Callable=jnp.zeros): "Creates a Conv2D module instance.\n\n Args:\n in_channels: number of channels of the input tensor.\n out_channels: number of channels of the output tensor.\n kernel_size: size of the convolution kernel, either tuple (height, width) or single number if they're the same.\n stride: convolution strides, either tuple (stride_y, stride_x) or single number if they're the same.\n dilation: spacing between kernel points (also known as astrous convolution),\n either tuple (dilation_y, dilation_x) or single number if they're the same.\n groups: number of input and output channels group. When groups > 1 convolution operation is applied\n individually for each group. nin and nout must both be divisible by groups.\n padding: padding of the input tensor, either Padding.SAME or Padding.VALID.\n bias: if True then convolution will have bias term.\n kernel_init: initializer for convolution kernel (a function that takes in a HWIO shape and returns a 4D matrix).\n " super().__init__() assert ((in_channels % groups) == 0), 'in_chs should be divisible by groups' assert ((out_channels % groups) == 0), 'out_chs should be divisible by groups' kernel_size = util.to_tuple(kernel_size, 2) self.weight = TrainVar(kernel_init((out_channels, (in_channels // groups), *kernel_size))) self.bias = (TrainVar(bias_init((out_channels,))) if bias else None) self.strides = util.to_tuple(stride, 2) self.dilations = util.to_tuple(dilation, 2) if isinstance(padding, str): if (padding == 'LIKE'): padding = (get_like_padding(kernel_size[0], self.strides[0], self.dilations[0]), get_like_padding(kernel_size[1], self.strides[1], self.dilations[1])) padding = [padding, padding] else: padding = util.to_tuple(padding, 2) padding = [padding, padding] self.padding = padding self.groups = groups def __call__(self, x: JaxArray) -> JaxArray: 'Returns the results of applying the convolution to input x.' y = lax.conv_general_dilated(x, self.weight.value, self.strides, self.padding, rhs_dilation=self.dilations, feature_group_count=self.groups, dimension_numbers=('NCHW', 'OIHW', 'NCHW')) if self.bias: y += self.bias.value.reshape((1, (- 1), 1, 1)) return y
class Linear(Module): 'Applies a linear transformation on an input batch.' def __init__(self, in_features: int, out_features: int, bias: bool=True, weight_init: Callable=xavier_normal, bias_init: Callable=jnp.zeros): 'Creates a Linear module instance.\n\n Args:\n in_features: number of channels of the input tensor.\n out_features: number of channels of the output tensor.\n bias: if True then linear layer will have bias term.\n weight_init: weight initializer for linear layer (a function that takes in a IO shape and returns a 2D matrix).\n ' super().__init__() self.weight = TrainVar(weight_init((out_features, in_features))) self.bias = (TrainVar(bias_init(out_features)) if bias else None) def __call__(self, x: JaxArray) -> JaxArray: 'Returns the results of applying the linear transformation to input x.' y = jnp.dot(x, self.weight.value.transpose()) if self.bias: y += self.bias.value return y