text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
def __init__( self, do_resize: bool = True, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 256} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size) self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
# Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio.
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ default_to_square = True if "shortest_edge" in size: size = size["shortest_edge"] default_to_square = False elif "height" in size and "width" in size: size = (size["height"], size["width"]) else:
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
output_size = get_resize_output_image_size( image, size=size, default_to_square=default_to_square, input_data_format=input_data_format, ) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, )
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
@filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Preprocess an image or batch of images.
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`.
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
- Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images]
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) all_images.append(image) images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
3,619
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
class MobileNetV1ConvLayer(nn.Module): def __init__( self, config: MobileNetV1Config, in_channels: int, out_channels: int, kernel_size: int, stride: Optional[int] = 1, groups: Optional[int] = 1, bias: bool = False, use_normalization: Optional[bool] = True, use_activation: Optional[bool or str] = True, ) -> None: super().__init__() self.config = config if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") padding = 0 if config.tf_padding else int((kernel_size - 1) / 2)
3,620
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=config.layer_norm_eps, momentum=0.9997, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None
3,620
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
def forward(self, features: torch.Tensor) -> torch.Tensor: if self.config.tf_padding: features = apply_tf_padding(features, self.convolution) features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features
3,620
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
class MobileNetV1PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileNetV1Config load_tf_weights = load_tf_weights_in_mobilenet_v1 base_model_prefix = "mobilenet_v1" main_input_name = "pixel_values" supports_gradient_checkpointing = False _no_split_modules = [] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.BatchNorm2d): module.bias.data.zero_() module.weight.data.fill_(1.0)
3,621
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
class MobileNetV1Model(MobileNetV1PreTrainedModel): def __init__(self, config: MobileNetV1Config, add_pooling_layer: bool = True): super().__init__(config) self.config = config depth = 32 out_channels = max(int(depth * config.depth_multiplier), config.min_depth) self.conv_stem = MobileNetV1ConvLayer( config, in_channels=config.num_channels, out_channels=out_channels, kernel_size=3, stride=2, ) strides = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] self.layer = nn.ModuleList() for i in range(13): in_channels = out_channels if strides[i] == 2 or i == 0: depth *= 2 out_channels = max(int(depth * config.depth_multiplier), config.min_depth)
3,622
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
self.layer.append( MobileNetV1ConvLayer( config, in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=strides[i], groups=in_channels, ) ) self.layer.append( MobileNetV1ConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, ) ) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): raise NotImplementedError
3,622
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
@add_start_docstrings_to_model_forward(MOBILENET_V1_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.conv_stem(pixel_values)
3,622
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) last_hidden_state = hidden_states if self.pooler is not None: pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1) else: pooled_output = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=all_hidden_states, )
3,622
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
class MobileNetV1ForImageClassification(MobileNetV1PreTrainedModel): def __init__(self, config: MobileNetV1Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilenet_v1 = MobileNetV1Model(config) last_hidden_size = self.mobilenet_v1.layer[-1].convolution.out_channels # Classifier head self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True) self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init()
3,623
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
@add_start_docstrings_to_model_forward(MOBILENET_V1_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """
3,623
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,623
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
outputs = self.mobilenet_v1(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(self.dropout(pooled_output)) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification"
3,623
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, )
3,623
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
class MobileNetV1Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileNetV1Model`]. It is used to instantiate a MobileNetV1 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MobileNetV1 [google/mobilenet_v1_1.0_224](https://huggingface.co/google/mobilenet_v1_1.0_224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
3,624
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. depth_multiplier (`float`, *optional*, defaults to 1.0): Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32 channels. This is sometimes also called "alpha" or "width multiplier". min_depth (`int`, *optional*, defaults to 8): All layers will have at least this many channels. hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`): The non-linear activation function (function or string) in the Transformer encoder and convolution layers. tf_padding (`bool`, *optional*, defaults to `True`): Whether to use TensorFlow padding rules on the convolution layers. classifier_dropout_prob (`float`, *optional*, defaults to 0.999):
3,624
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
The dropout ratio for attached classifiers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 0.001): The epsilon used by the layer normalization layers.
3,624
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
Example: ```python >>> from transformers import MobileNetV1Config, MobileNetV1Model >>> # Initializing a "mobilenet_v1_1.0_224" style configuration >>> configuration = MobileNetV1Config() >>> # Initializing a model from the "mobilenet_v1_1.0_224" style configuration >>> model = MobileNetV1Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mobilenet_v1" def __init__( self, num_channels=3, image_size=224, depth_multiplier=1.0, min_depth=8, hidden_act="relu6", tf_padding=True, classifier_dropout_prob=0.999, initializer_range=0.02, layer_norm_eps=0.001, **kwargs, ): super().__init__(**kwargs) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero.")
3,624
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.min_depth = min_depth self.hidden_act = hidden_act self.tf_padding = tf_padding self.classifier_dropout_prob = classifier_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps
3,624
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
class MobileNetV1OnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})]) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})]) @property def atol_for_validation(self) -> float: return 1e-4
3,625
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
class TFConvNextDropPath(keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). References: (1) github.com:rwightman/pytorch-image-models """ def __init__(self, drop_path: float, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path def call(self, x: tf.Tensor, training=None): if training: keep_prob = 1 - self.drop_path shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) random_tensor = keep_prob + tf.random.uniform(shape, 0, 1) random_tensor = tf.floor(random_tensor) return (x / keep_prob) * random_tensor return x
3,626
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
class TFConvNextEmbeddings(keras.layers.Layer): """This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. """ def __init__(self, config: ConvNextConfig, **kwargs): super().__init__(**kwargs) self.patch_embeddings = keras.layers.Conv2D( filters=config.hidden_sizes[0], kernel_size=config.patch_size, strides=config.patch_size, name="patch_embeddings", kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), ) self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm") self.num_channels = config.num_channels self.config = config def call(self, pixel_values): if isinstance(pixel_values, dict): pixel_values = pixel_values["pixel_values"]
3,627
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
tf.debugging.assert_equal( shape_list(pixel_values)[1], self.num_channels, message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.", ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) embeddings = self.patch_embeddings(pixel_values) embeddings = self.layernorm(embeddings) return embeddings
3,627
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "patch_embeddings", None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build([None, None, None, self.config.num_channels]) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
3,627
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
class TFConvNextLayer(keras.layers.Layer): """This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow NHWC ordering, we can just apply the operations straight-away without the permutation. Args: config ([`ConvNextConfig`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0. """
3,628
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
def __init__(self, config, dim, drop_path=0.0, **kwargs): super().__init__(**kwargs) self.dim = dim self.config = config self.dwconv = keras.layers.Conv2D( filters=dim, kernel_size=7, padding="same", groups=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="dwconv", ) # depthwise conv self.layernorm = keras.layers.LayerNormalization( epsilon=1e-6, name="layernorm", ) self.pwconv1 = keras.layers.Dense( units=4 * dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="pwconv1", ) # pointwise/1x1 convs, implemented with linear layers self.act = get_tf_activation(config.hidden_act) self.pwconv2 = keras.layers.Dense( units=dim,
3,628
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="pwconv2", ) # Using `layers.Activation` instead of `tf.identity` to better control `training` # behaviour. self.drop_path = ( TFConvNextDropPath(drop_path, name="drop_path") if drop_path > 0.0 else keras.layers.Activation("linear", name="drop_path") )
3,628
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
def build(self, input_shape: tf.TensorShape = None): # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa) self.layer_scale_parameter = ( self.add_weight( shape=(self.dim,), initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value), trainable=True, name="layer_scale_parameter", ) if self.config.layer_scale_init_value > 0 else None )
3,628
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
if self.built: return self.built = True if getattr(self, "dwconv", None) is not None: with tf.name_scope(self.dwconv.name): self.dwconv.build([None, None, None, self.dim]) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, None, self.dim]) if getattr(self, "pwconv1", None) is not None: with tf.name_scope(self.pwconv1.name): self.pwconv1.build([None, None, self.dim]) if getattr(self, "pwconv2", None) is not None: with tf.name_scope(self.pwconv2.name): self.pwconv2.build([None, None, 4 * self.dim]) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None)
3,628
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
def call(self, hidden_states, training=False): input = hidden_states x = self.dwconv(hidden_states) x = self.layernorm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) if self.layer_scale_parameter is not None: x = self.layer_scale_parameter * x x = input + self.drop_path(x, training=training) return x
3,628
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
class TFConvNextStage(keras.layers.Layer): """ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config (`ConvNextV2Config`): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`List[float]`): Stochastic depth rates for each layer. """
3,629
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
def __init__( self, config: ConvNextConfig, in_channels: int, out_channels: int, kernel_size: int = 2, stride: int = 2, depth: int = 2, drop_path_rates: Optional[List[float]] = None, **kwargs, ): super().__init__(**kwargs) if in_channels != out_channels or stride > 1: self.downsampling_layer = [ keras.layers.LayerNormalization( epsilon=1e-6, name="downsampling_layer.0", ), # Inputs to this layer will follow NHWC format since we # transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings` # layer. All the outputs throughout the model will be in NHWC # from this point on until the output where we again change to # NCHW. keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size,
3,629
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
strides=stride, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name="downsampling_layer.1", ), ] else: self.downsampling_layer = [tf.identity]
3,629
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
drop_path_rates = drop_path_rates or [0.0] * depth self.layers = [ TFConvNextLayer( config, dim=out_channels, drop_path=drop_path_rates[j], name=f"layers.{j}", ) for j in range(depth) ] self.in_channels = in_channels self.out_channels = out_channels self.stride = stride def call(self, hidden_states): for layer in self.downsampling_layer: hidden_states = layer(hidden_states) for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states
3,629
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) if self.in_channels != self.out_channels or self.stride > 1: with tf.name_scope(self.downsampling_layer[0].name): self.downsampling_layer[0].build([None, None, None, self.in_channels]) with tf.name_scope(self.downsampling_layer[1].name): self.downsampling_layer[1].build([None, None, None, self.in_channels])
3,629
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
class TFConvNextEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.stages = [] drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths)) drop_path_rates = tf.split(drop_path_rates, config.depths) drop_path_rates = [x.numpy().tolist() for x in drop_path_rates] prev_chs = config.hidden_sizes[0] for i in range(config.num_stages): out_chs = config.hidden_sizes[i] stage = TFConvNextStage( config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i], name=f"stages.{i}", ) self.stages.append(stage) prev_chs = out_chs
3,630
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
def call(self, hidden_states, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.stages): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states) def build(self, input_shape=None): for stage in self.stages: with tf.name_scope(stage.name): stage.build(None)
3,630
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
class TFConvNextMainLayer(keras.layers.Layer): config_class = ConvNextConfig def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFConvNextEmbeddings(config, name="embeddings") self.encoder = TFConvNextEncoder(config, name="encoder") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") # We are setting the `data_format` like so because from here on we will revert to the # NCHW output format self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_first") if add_pooling_layer else None
3,631
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
@unpack_inputs def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, )
3,631
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
last_hidden_state = encoder_outputs[0] # Change to NCHW output format have uniformity in the modules last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2)) pooled_output = self.layernorm(self.pooler(last_hidden_state)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: hidden_states = hidden_states if output_hidden_states else () return (last_hidden_state, pooled_output) + hidden_states return TFBaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, )
3,631
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, self.config.hidden_sizes[-1]])
3,631
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
class TFConvNextPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ConvNextConfig base_model_prefix = "convnext" main_input_name = "pixel_values"
3,632
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
class TFConvNextModel(TFConvNextPreTrainedModel): def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs): super().__init__(config, *inputs, **kwargs) self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name="convnext") @unpack_inputs @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, TFConvNextModel >>> from PIL import Image >>> import requests
3,633
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") >>> model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224") >>> inputs = image_processor(images=image, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values")
3,633
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
outputs = self.convnext( pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convnext", None) is not None: with tf.name_scope(self.convnext.name): self.convnext.build(None)
3,633
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: ConvNextConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.convnext = TFConvNextMainLayer(config, name="convnext") # Classifier head self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="classifier", ) self.config = config
3,634
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
@unpack_inputs @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples:
3,634
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
```python >>> from transformers import AutoImageProcessor, TFConvNextForImageClassification >>> import tensorflow as tf >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") >>> model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224")
3,634
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
>>> inputs = image_processor(images=image, return_tensors="tf") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0] >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)]) ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") outputs = self.convnext( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs.pooler_output if return_dict else outputs[1]
3,634
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convnext", None) is not None: with tf.name_scope(self.convnext.name): self.convnext.build(None) if getattr(self, "classifier", None) is not None: if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_sizes[-1]])
3,634
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
class ConvNextImageProcessor(BaseImageProcessor): r""" Constructs a ConvNeXT image processor.
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
Args: do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`): Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can be overriden by `size` in the `preprocess` method. crop_pct (`float` *optional*, defaults to 224 / 256): Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
overriden by `crop_pct` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overriden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, crop_pct: float = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 384} size = get_size_dict(size, default_to_square=False)
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
self.do_resize = do_resize self.size = size # Default value set here for backwards compatibility where the value in config is None self.crop_pct = crop_pct if crop_pct is not None else 224 / 256 self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: Dict[str, int], crop_pct: float, resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image.
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. crop_pct (`float`): Percentage of the image to crop. Only has an effect if size < 384. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*):
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" not in size: raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}") shortest_edge = size["shortest_edge"]
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct resize_shortest_edge = int(shortest_edge / crop_pct) resize_size = get_resize_output_image_size( image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format ) image = resize( image=image, size=resize_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # then crop to (shortest_edge, shortest_edge) return center_crop( image=image, size=(shortest_edge, shortest_edge), data_format=data_format, input_data_format=input_data_format, **kwargs, ) else: # warping (no cropping) when evaluated at 384 or larger
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
return resize( image, size=(shortest_edge, shortest_edge), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, )
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
@filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, crop_pct: float = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images.
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
Percentage of the image to crop if size < 384. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation.
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
- Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize crop_pct = crop_pct if crop_pct is not None else self.crop_pct resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images]
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize( image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format ) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ]
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
3,635
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
class ConvNextFeatureExtractor(ConvNextImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ConvNextImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
3,636
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/feature_extraction_convnext.py
class ConvNextConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvNeXT [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
3,637
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/configuration_convnext.py
Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_size (`int`, *optional*, defaults to 4): Patch size to use in the patch embedding layer. num_stages (`int`, *optional*, defaults to 4): The number of stages in the model. hidden_sizes (`List[int]`, *optional*, defaults to [96, 192, 384, 768]): Dimensionality (hidden size) at each stage. depths (`List[int]`, *optional*, defaults to [3, 3, 9, 3]): Depth (number of blocks) for each stage. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
3,637
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/configuration_convnext.py
layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 1e-6): The initial value for the layer scale. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop rate for stochastic depth. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
3,637
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/configuration_convnext.py
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute.
3,637
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/configuration_convnext.py
Example: ```python >>> from transformers import ConvNextConfig, ConvNextModel >>> # Initializing a ConvNext convnext-tiny-224 style configuration >>> configuration = ConvNextConfig() >>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration >>> model = ConvNextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "convnext" def __init__( self, num_channels=3, patch_size=4, num_stages=4, hidden_sizes=None, depths=None, hidden_act="gelu", initializer_range=0.02, layer_norm_eps=1e-12, layer_scale_init_value=1e-6, drop_path_rate=0.0, image_size=224, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs)
3,637
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/configuration_convnext.py
self.num_channels = num_channels self.patch_size = patch_size self.num_stages = num_stages self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes self.depths = [3, 3, 9, 3] if depths is None else depths self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.image_size = image_size self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names )
3,637
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/configuration_convnext.py
class ConvNextOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-5
3,638
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/configuration_convnext.py
class ConvNextDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob)
3,639
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
class ConvNextLayerNorm(nn.Module): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {self.data_format}") self.normalized_shape = (normalized_shape,)
3,640
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
def forward(self, x: torch.Tensor) -> torch.Tensor: if self.data_format == "channels_last": x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": input_dtype = x.dtype x = x.float() u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = x.to(dtype=input_dtype) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x
3,640
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
class ConvNextEmbeddings(nn.Module): """This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. """ def __init__(self, config): super().__init__() self.patch_embeddings = nn.Conv2d( config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size ) self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first") self.num_channels = config.num_channels def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.patch_embeddings(pixel_values) embeddings = self.layernorm(embeddings) return embeddings
3,641
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
class ConvNextLayer(nn.Module): """This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Args: config ([`ConvNextConfig`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0. """
3,642
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
def __init__(self, config, dim, drop_path=0): super().__init__() self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv self.layernorm = ConvNextLayerNorm(dim, eps=1e-6) self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers self.act = ACT2FN[config.hidden_act] self.pwconv2 = nn.Linear(4 * dim, dim) self.layer_scale_parameter = ( nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True) if config.layer_scale_init_value > 0 else None ) self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
3,642
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: input = hidden_states x = self.dwconv(hidden_states) x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) x = self.layernorm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) if self.layer_scale_parameter is not None: x = self.layer_scale_parameter * x x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = input + self.drop_path(x) return x
3,642
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
class ConvNextStage(nn.Module): """ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config ([`ConvNextConfig`]): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`List[float]`): Stochastic depth rates for each layer. """ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None): super().__init__()
3,643
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
if in_channels != out_channels or stride > 1: self.downsampling_layer = nn.Sequential( ConvNextLayerNorm(in_channels, eps=1e-6, data_format="channels_first"), nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride), ) else: self.downsampling_layer = nn.Identity() drop_path_rates = drop_path_rates or [0.0] * depth self.layers = nn.Sequential( *[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)] ) def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: hidden_states = self.downsampling_layer(hidden_states) hidden_states = self.layers(hidden_states) return hidden_states
3,643
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py