text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
# flatten to a single sequence patch_embeds = torch.cat([p.flatten(1).T for p in patch_embeds_list], dim=0).unsqueeze(0) patch_embeds = self.ln_pre(patch_embeds) # positional embeddings position_ids = position_ids_in_meshgrid( patch_embeds_list, max_width=self.config.image_size // self.config.patch_size ).to(self.device) position_embedding = self.patch_positional_embedding(patch_embeds, position_ids) attention_mask = generate_block_attention_mask( [p.shape[-2] * p.shape[-1] for p in patch_embeds_list], patch_embeds ) return self.transformer(patch_embeds, attention_mask, position_embedding)
2,940
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py
class BatchMixFeature(BatchFeature): def to(self, *args, **kwargs) -> "BatchMixFeature": """ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in different `dtypes` and sending the `BatchFeature` to a different `device`. Args: args (`Tuple`): Will be passed to the `to(...)` function of the tensors. kwargs (`Dict`, *optional*): Will be passed to the `to(...)` function of the tensors. Returns: [`BatchFeature`]: The same instance after modification. """
2,941
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
def _recursive_to(obj, device, *args, **kwargs): # Lists can be nested, so keep digging until we hit tensors if isinstance(obj, list): return [_recursive_to(o, device, *args, **kwargs) for o in obj] # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor` elif isinstance(obj, torch.Tensor) and torch.is_floating_point(obj): # cast and send to device return obj.to(*args, **kwargs) elif isinstance(obj, torch.Tensor) and device is not None: # only send to device, don't cast return obj.to(device=device) else: return obj requires_backends(self, ["torch"]) import torch # noqa
2,941
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
device = kwargs.get("device") # Check if the args are a device or a dtype if device is None and len(args) > 0: # device should be always the first argument arg = args[0] if is_torch_dtype(arg): # The first argument is a dtype pass elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int): device = arg else: # it's something else raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.") self.data = {k: _recursive_to(v, device, *args, **kwargs) for k, v in self.data.items()} return self
2,941
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
class PixtralImageProcessor(BaseImageProcessor): r""" Constructs a Pixtral image processor.
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"longest_edge": 1024}`): Size of the maximum dimension of either the height or width dimension of the image. Used to control how images are resized. If either the height or width are greater than `size["longest_edge"]` then both the height and width are rescaled by `height / ratio`, `width /ratio` where `ratio = max(height / longest_edge, width / longest_edge)` patch_size (`Dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`): Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, patch_size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"longest_edge": 1024} patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16} patch_size = get_size_dict(patch_size, default_to_square=True)
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
self.do_resize = do_resize self.size = size self.patch_size = patch_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.48145466, 0.4578275, 0.40821073] self.image_std = image_std if image_std is not None else [0.26862954, 0.26130258, 0.27577711] self.do_convert_rgb = do_convert_rgb self._valid_processor_keys = [ "images", "do_resize", "size", "patch_size", "resample", "do_rescale", "rescale_factor", "do_normalize", "image_mean", "image_std", "do_convert_rgb", "return_tensors", "data_format", "input_data_format", ]
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
def resize( self, image: np.ndarray, size: Dict[str, int], patch_size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio.
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dict containing the longest possible edge of the image. patch_size (`Dict[str, int]`): Patch size used to calculate the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if "longest_edge" in size: size = (size["longest_edge"], size["longest_edge"]) elif "height" in size and "width" in size:
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
size = (size["height"], size["width"]) else: raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.")
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
if "height" in patch_size and "width" in patch_size: patch_size = (patch_size["height"], patch_size["width"]) else: raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size( image, size=size, patch_size=patch_size, input_data_format=input_data_format, ) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, )
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, patch_size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images.
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Describes the maximum input dimensions to the model. patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Patch size in the model. Used to calculate the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*):
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image.
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ patch_size = patch_size if patch_size is not None else self.patch_size patch_size = get_size_dict(patch_size, default_to_square=True)
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) images_list = make_list_of_images(images)
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
if not valid_images(images_list[0]): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) if do_convert_rgb: images_list = [[convert_to_rgb(image) for image in images] for images in images_list] # All transformations expect numpy arrays. images_list = [[to_numpy_array(image) for image in images] for images in images_list]
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
if do_rescale and is_scaled_image(images_list[0][0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images_list[0][0])
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
batch_images = [] batch_image_sizes = [] for sample_images in images_list: images = [] image_sizes = [] for image in sample_images: if do_resize: image = self.resize( image=image, size=size, patch_size=patch_size, resample=resample, input_data_format=input_data_format, ) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format )
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
images.append(image) image_sizes.append(get_image_size(image, input_data_format)) batch_images.append(images) batch_image_sizes.append(image_sizes) images_list = [ [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] for images in batch_images ] # Convert to tensor type outside of BatchFeature to avoid batching the images of different sizes images_list = [[convert_to_tensor(image, return_tensors) for image in images] for images in images_list] return BatchMixFeature(data={"pixel_values": images_list, "image_sizes": batch_image_sizes}, tensor_type=None)
2,942
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral.py
class PixtralImageProcessorFast(BaseImageProcessorFast): r""" Constructs a fast Pixtral image processor that leverages torchvision.
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"longest_edge": 1024}`): Size of the maximum dimension of either the height or width dimension of the image. Used to control how images are resized. If either the height or width are greater than `size["longest_edge"]` then both the height and width are rescaled by `height / ratio`, `width /ratio` where `ratio = max(height / longest_edge, width / longest_edge)` patch_size (`Dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`): Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, patch_size: Dict[str, int] = None, resample: Union[PILImageResampling, "F.InterpolationMode"] = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"longest_edge": 1024} patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16} patch_size = get_size_dict(patch_size, default_to_square=True)
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
self.do_resize = do_resize self.size = size self.patch_size = patch_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.48145466, 0.4578275, 0.40821073] self.image_std = image_std if image_std is not None else [0.26862954, 0.26130258, 0.27577711] self.do_convert_rgb = do_convert_rgb self._valid_processor_keys = [ "images", "do_resize", "size", "patch_size", "resample", "do_rescale", "rescale_factor", "do_normalize", "image_mean", "image_std", "do_convert_rgb", "return_tensors", "data_format", "input_data_format", ]
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
def resize( self, image: torch.Tensor, size: Dict[str, int], patch_size: Dict[str, int], interpolation: "F.InterpolationMode" = None, **kwargs, ) -> torch.Tensor: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio.
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
Args: image (`torch.Tensor`): Image to resize. size (`Dict[str, int]`): Dict containing the longest possible edge of the image. patch_size (`Dict[str, int]`): Patch size used to calculate the size of the output image. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): Resampling filter to use when resiizing the image. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR if "longest_edge" in size: size = (size["longest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.")
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
if "height" in patch_size and "width" in patch_size: patch_size = (patch_size["height"], patch_size["width"]) else: raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size( image, size=size, patch_size=patch_size, ) return F.resize( image, size=output_size, interpolation=interpolation, **kwargs, )
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, patch_size: Dict[str, int] = None, resample: Optional[Union[PILImageResampling, "F.InterpolationMode"]] = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> BatchMixFeature: """ Preprocess an image or batch of images.
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Describes the maximum input dimensions to the model. patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Patch size in the model. Used to calculate the image after resizing. resample (`PILImageResampling` or `InterpolationMode`, *optional*, defaults to self.resample): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`.
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB.
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
- Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ patch_size = patch_size if patch_size is not None else self.patch_size patch_size = get_size_dict(patch_size, default_to_square=True)
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb device = kwargs.pop("device", None) validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) images_list = make_list_of_images(images) image_type = get_image_type(images_list[0][0])
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
if image_type not in [ImageType.PIL, ImageType.TORCH, ImageType.NUMPY]: raise ValueError(f"Unsupported input image type {image_type}") validate_fast_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, return_tensors=return_tensors, data_format=data_format, ) if do_convert_rgb: images_list = [[convert_to_rgb(image) for image in images] for images in images_list]
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
if image_type == ImageType.PIL: images_list = [[F.pil_to_tensor(image) for image in images] for images in images_list] elif image_type == ImageType.NUMPY: # not using F.to_tensor as it doesn't handle (C, H, W) numpy arrays images_list = [[torch.from_numpy(image).contiguous() for image in images] for images in images_list] if device is not None: images_list = [[image.to(device) for image in images] for images in images_list] # We assume that all images have the same channel dimension format. if input_data_format is None: input_data_format = infer_channel_dimension_format(images_list[0][0]) if input_data_format == ChannelDimension.LAST: images_list = [[image.permute(2, 0, 1).contiguous() for image in images] for images in images_list] input_data_format = ChannelDimension.FIRST
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
if do_rescale and do_normalize: # fused rescale and normalize new_mean = torch.tensor(image_mean, device=images_list[0][0].device) * (1.0 / rescale_factor) new_std = torch.tensor(image_std, device=images_list[0][0].device) * (1.0 / rescale_factor) batch_images = [] batch_image_sizes = [] for sample_images in images_list: images = [] image_sizes = [] for image in sample_images: if do_resize: interpolation = ( pil_torch_interpolation_mapping[resample] if isinstance(resample, (PILImageResampling, int)) else resample ) image = self.resize( image=image, size=size, patch_size=patch_size, interpolation=interpolation, )
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
if do_rescale and do_normalize: # fused rescale and normalize image = F.normalize(image.to(dtype=torch.float32), new_mean, new_std) elif do_rescale: image = image * rescale_factor elif do_normalize: image = F.normalize(image, image_mean, image_std) images.append(image) image_sizes.append(get_image_size(image, input_data_format)) batch_images.append(images) batch_image_sizes.append(image_sizes) return BatchMixFeature( data={"pixel_values": batch_images, "image_sizes": batch_image_sizes}, tensor_type=None, )
2,943
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py
class GLPNConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GLPN [vinvino02/glpn-kitti](https://huggingface.co/vinvino02/glpn-kitti) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
2,944
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/configuration_glpn.py
Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_encoder_blocks (`int`, *optional*, defaults to 4): The number of encoder blocks (i.e. stages in the Mix Transformer encoder). depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`): The number of layers in each encoder block. sr_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`): Sequence reduction ratios in each encoder block. hidden_sizes (`List[int]`, *optional*, defaults to `[32, 64, 160, 256]`): Dimension of each of the encoder blocks. patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`): Patch size before each encoder block. strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`): Stride before each encoder block. num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
2,944
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/configuration_glpn.py
Number of attention heads for each attention layer in each block of the Transformer encoder. mlp_ratios (`List[int]`, *optional*, defaults to `[4, 4, 4, 4]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02):
2,944
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/configuration_glpn.py
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. drop_path_rate (`float`, *optional*, defaults to 0.1): The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. decoder_hidden_size (`int`, *optional*, defaults to 64): The dimension of the decoder. max_depth (`int`, *optional*, defaults to 10): The maximum depth of the decoder. head_in_index (`int`, *optional*, defaults to -1): The index of the features to use in the head.
2,944
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/configuration_glpn.py
Example: ```python >>> from transformers import GLPNModel, GLPNConfig >>> # Initializing a GLPN vinvino02/glpn-kitti style configuration >>> configuration = GLPNConfig() >>> # Initializing a model from the vinvino02/glpn-kitti style configuration >>> model = GLPNModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "glpn"
2,944
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/configuration_glpn.py
def __init__( self, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[32, 64, 160, 256], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], num_attention_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, drop_path_rate=0.1, layer_norm_eps=1e-6, decoder_hidden_size=64, max_depth=10, head_in_index=-1, **kwargs, ): super().__init__(**kwargs)
2,944
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/configuration_glpn.py
self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.depths = depths self.sr_ratios = sr_ratios self.hidden_sizes = hidden_sizes self.patch_sizes = patch_sizes self.strides = strides self.mlp_ratios = mlp_ratios self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.drop_path_rate = drop_path_rate self.layer_norm_eps = layer_norm_eps self.decoder_hidden_size = decoder_hidden_size self.max_depth = max_depth self.head_in_index = head_in_index
2,944
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/configuration_glpn.py
class GLPNImageProcessor(BaseImageProcessor): r""" Constructs a GLPN image processor.
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions, rounding them down to the closest multiple of `size_divisor`. Can be overridden by `do_resize` in `preprocess`. size_divisor (`int`, *optional*, defaults to 32): When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest multiple of `size_divisor`. Can be overridden by `size_divisor` in `preprocess`. resample (`PIL.Image` resampling filter, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. do_rescale (`bool`, *optional*, defaults to `True`): Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Can be overridden by `do_rescale` in `preprocess`. """ model_input_names = ["pixel_values"]
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
def __init__( self, do_resize: bool = True, size_divisor: int = 32, resample=PILImageResampling.BILINEAR, do_rescale: bool = True, **kwargs, ) -> None: self.do_resize = do_resize self.do_rescale = do_rescale self.size_divisor = size_divisor self.resample = resample super().__init__(**kwargs) def resize( self, image: np.ndarray, size_divisor: int, resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image, rounding the (height, width) dimensions down to the closest multiple of size_divisor. If the image is of dimension (3, 260, 170) and size_divisor is 32, the image will be resized to (3, 256, 160).
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
Args: image (`np.ndarray`): The image to resize. size_divisor (`int`): The image is resized so its height and width are rounded down to the closest multiple of `size_divisor`. resample: `PIL.Image` resampling filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If `None`, the channel dimension format of the input image is used. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not set, the channel dimension format is inferred
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
Returns: `np.ndarray`: The resized image. """ height, width = get_image_size(image, channel_dim=input_data_format) # Rounds the height and width down to the closest multiple of size_divisor new_h = height // size_divisor * size_divisor new_w = width // size_divisor * size_divisor image = resize( image, (new_h, new_w), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) return image
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
@filter_out_non_signature_kwargs() def preprocess( self, images: Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]], do_resize: Optional[bool] = None, size_divisor: Optional[int] = None, resample=None, do_rescale: Optional[bool] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ Preprocess the given images.
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
Args: images (`PIL.Image.Image` or `TensorType` or `List[np.ndarray]` or `List[TensorType]`): Images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_normalize=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the input such that the (height, width) dimensions are a multiple of `size_divisor`. size_divisor (`int`, *optional*, defaults to `self.size_divisor`): When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest multiple of `size_divisor`. resample (`PIL.Image` resampling filter, *optional*, defaults to `self.resample`): `PIL.Image` resampling filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - `None`: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of:
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale size_divisor = size_divisor if size_divisor is not None else self.size_divisor
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
resample = resample if resample is not None else self.resample
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # Here, the rescale() method uses a constant rescale_factor. It does not need to be validated # with a rescale_factor. validate_preprocess_arguments( do_resize=do_resize, size=size_divisor, # Here, size_divisor is used as a parameter for optimal resizing instead of size. resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(img) for img in images]
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [self.rescale(image, scale=1 / 255, input_data_format=input_data_format) for image in images] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ]
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) def post_process_depth_estimation( self, outputs: "DepthEstimatorOutput", target_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]] = None, ) -> List[Dict[str, TensorType]]: """ Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images. Only supports PyTorch. Args: outputs ([`DepthEstimatorOutput`]): Raw outputs of the model. target_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized.
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
Returns: `List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth predictions. """ requires_backends(self, "torch") predicted_depth = outputs.predicted_depth if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the predicted depth" ) results = [] target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes for depth, target_size in zip(predicted_depth, target_sizes): if target_size is not None: depth = depth[None, None, ...] depth = torch.nn.functional.interpolate(depth, size=target_size, mode="bicubic", align_corners=False) depth = depth.squeeze() results.append({"predicted_depth": depth}) return results
2,945
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/image_processing_glpn.py
class GLPNDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob)
2,946
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNOverlapPatchEmbeddings(nn.Module): """Construct the overlapping patch embeddings.""" def __init__(self, patch_size, stride, num_channels, hidden_size): super().__init__() self.proj = nn.Conv2d( num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=patch_size // 2, ) self.layer_norm = nn.LayerNorm(hidden_size) def forward(self, pixel_values): embeddings = self.proj(pixel_values) _, _, height, width = embeddings.shape # (batch_size, num_channels, height, width) -> (batch_size, num_channels, height*width) -> (batch_size, height*width, num_channels) # this can be fed to a Transformer layer embeddings = embeddings.flatten(2).transpose(1, 2) embeddings = self.layer_norm(embeddings) return embeddings, height, width
2,947
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNEfficientSelfAttention(nn.Module): """SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT paper](https://arxiv.org/abs/2102.12122).""" def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio): super().__init__() self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError( f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " f"heads ({self.num_attention_heads})" ) self.attention_head_size = int(self.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size
2,948
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
self.query = nn.Linear(self.hidden_size, self.all_head_size) self.key = nn.Linear(self.hidden_size, self.all_head_size) self.value = nn.Linear(self.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.sr_ratio = sequence_reduction_ratio if sequence_reduction_ratio > 1: self.sr = nn.Conv2d( hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio ) self.layer_norm = nn.LayerNorm(hidden_size) def transpose_for_scores(self, hidden_states): new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size) hidden_states = hidden_states.view(new_shape) return hidden_states.permute(0, 2, 1, 3)
2,948
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
def forward( self, hidden_states, height, width, output_attentions=False, ): query_layer = self.transpose_for_scores(self.query(hidden_states)) if self.sr_ratio > 1: batch_size, seq_len, num_channels = hidden_states.shape # Reshape to (batch_size, num_channels, height, width) hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) # Apply sequence reduction hidden_states = self.sr(hidden_states) # Reshape back to (batch_size, seq_len, num_channels) hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1) hidden_states = self.layer_norm(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states))
2,948
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
# Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
2,948
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNSelfOutput(nn.Module): def __init__(self, config, hidden_size): super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
2,949
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNAttention(nn.Module): def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio): super().__init__() self.self = GLPNEfficientSelfAttention( config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio, ) self.output = GLPNSelfOutput(config, hidden_size=hidden_size) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads )
2,950
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
# Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, height, width, output_attentions=False): self_outputs = self.self(hidden_states, height, width, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs
2,950
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNDWConv(nn.Module): def __init__(self, dim=768): super().__init__() self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) def forward(self, hidden_states, height, width): batch_size, seq_len, num_channels = hidden_states.shape hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width) hidden_states = self.dwconv(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) return hidden_states
2,951
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNMixFFN(nn.Module): def __init__(self, config, in_features, hidden_features=None, out_features=None): super().__init__() out_features = out_features or in_features self.dense1 = nn.Linear(in_features, hidden_features) self.dwconv = GLPNDWConv(hidden_features) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(hidden_features, out_features) self.dropout = nn.Dropout(config.hidden_dropout_prob)
2,952
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
def forward(self, hidden_states, height, width): hidden_states = self.dense1(hidden_states) hidden_states = self.dwconv(hidden_states, height, width) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
2,952
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNLayer(nn.Module): """This corresponds to the Block class in the original implementation.""" def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio): super().__init__() self.layer_norm_1 = nn.LayerNorm(hidden_size) self.attention = GLPNAttention( config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio, ) self.drop_path = GLPNDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_norm_2 = nn.LayerNorm(hidden_size) mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = GLPNMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
2,953
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
def forward(self, hidden_states, height, width, output_attentions=False): self_attention_outputs = self.attention( self.layer_norm_1(hidden_states), # in GLPN, layernorm is applied before self-attention height, width, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection (with stochastic depth) attention_output = self.drop_path(attention_output) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width) # second residual connection (with stochastic depth) mlp_output = self.drop_path(mlp_output) layer_output = mlp_output + hidden_states outputs = (layer_output,) + outputs return outputs
2,953
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] # patch embeddings embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append( GLPNOverlapPatchEmbeddings( patch_size=config.patch_sizes[i], stride=config.strides[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], ) ) self.patch_embeddings = nn.ModuleList(embeddings)
2,954
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
# Transformer blocks blocks = [] cur = 0 for i in range(config.num_encoder_blocks): # each block consists of layers layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append( GLPNLayer( config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=dpr[cur + j], sequence_reduction_ratio=config.sr_ratios[i], mlp_ratio=config.mlp_ratios[i], ) ) blocks.append(nn.ModuleList(layers)) self.block = nn.ModuleList(blocks) # Layer norms self.layer_norm = nn.ModuleList( [nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)] )
2,954
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
def forward( self, pixel_values, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = pixel_values.shape[0]
2,954
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
hidden_states = pixel_values for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)): embedding_layer, block_layer, norm_layer = x # first, obtain patch embeddings hidden_states, height, width = embedding_layer(hidden_states) # second, send embeddings through blocks for i, blk in enumerate(block_layer): layer_outputs = blk(hidden_states, height, width, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) # third, apply layer norm hidden_states = norm_layer(hidden_states) # fourth, optionally reshape back to (batch_size, num_channels, height, width) hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() if output_hidden_states:
2,954
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
all_hidden_states = all_hidden_states + (hidden_states,)
2,954
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, )
2,954
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GLPNConfig base_model_prefix = "glpn" main_input_name = "pixel_values" _no_split_modules = []
2,955
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
# Copied from transformers.models.segformer.modeling_segformer.SegformerPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
2,955
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNModel(GLPNPreTrainedModel): # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.__init__ with Segformer->GLPN def __init__(self, config): super().__init__(config) self.config = config # hierarchical Transformer encoder self.encoder = GLPNEncoder(config) # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)
2,956
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
@add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.forward def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states )
2,956
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
2,956
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
encoder_outputs = self.encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
2,956
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNSelectiveFeatureFusion(nn.Module): """ Selective Feature Fusion module, as explained in the [paper](https://arxiv.org/abs/2201.07436) (section 3.4). This module adaptively selects and integrates local and global features by attaining an attention map for each feature. """ def __init__(self, in_channel=64): super().__init__() self.convolutional_layer1 = nn.Sequential( nn.Conv2d(in_channels=int(in_channel * 2), out_channels=in_channel, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(in_channel), nn.ReLU(), ) self.convolutional_layer2 = nn.Sequential( nn.Conv2d(in_channels=in_channel, out_channels=int(in_channel / 2), kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(int(in_channel / 2)), nn.ReLU(), ) self.convolutional_layer3 = nn.Conv2d( in_channels=int(in_channel / 2), out_channels=2, kernel_size=3, stride=1, padding=1 )
2,957
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
self.sigmoid = nn.Sigmoid() def forward(self, local_features, global_features): # concatenate features along the channel dimension features = torch.cat((local_features, global_features), dim=1) # pass through convolutional layers features = self.convolutional_layer1(features) features = self.convolutional_layer2(features) features = self.convolutional_layer3(features) # apply sigmoid to get two-channel attention map attn = self.sigmoid(features) # construct hybrid features by adding element-wise hybrid_features = local_features * attn[:, 0, :, :].unsqueeze(1) + global_features * attn[ :, 1, :, : ].unsqueeze(1) return hybrid_features
2,957
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNDecoderStage(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() should_skip = in_channels == out_channels self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1) if not should_skip else nn.Identity() self.fusion = GLPNSelectiveFeatureFusion(out_channels) self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) def forward(self, hidden_state, residual=None): hidden_state = self.convolution(hidden_state) if residual is not None: hidden_state = self.fusion(hidden_state, residual) hidden_state = self.upsample(hidden_state) return hidden_state hidden_state = self.upsample(hidden_state) return hidden_state
2,958
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNDecoder(nn.Module): def __init__(self, config): super().__init__() # we use features from end -> start reserved_hidden_sizes = config.hidden_sizes[::-1] out_channels = config.decoder_hidden_size self.stages = nn.ModuleList( [GLPNDecoderStage(hidden_size, out_channels) for hidden_size in reserved_hidden_sizes] ) # don't fuse in first stage self.stages[0].fusion = None self.final_upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]: stage_hidden_states = [] stage_hidden_state = None for hidden_state, stage in zip(hidden_states[::-1], self.stages): stage_hidden_state = stage(hidden_state, stage_hidden_state) stage_hidden_states.append(stage_hidden_state) stage_hidden_states[-1] = self.final_upsample(stage_hidden_state) return stage_hidden_states
2,959
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class SiLogLoss(nn.Module): r""" Implements the Scale-invariant log scale loss [Eigen et al., 2014](https://arxiv.org/abs/1406.2283). $$L=\frac{1}{n} \sum_{i} d_{i}^{2}-\frac{1}{2 n^{2}}\left(\sum_{i} d_{i}^{2}\right)$$ where $d_{i}=\log y_{i}-\log y_{i}^{*}$. """ def __init__(self, lambd=0.5): super().__init__() self.lambd = lambd def forward(self, pred, target): valid_mask = (target > 0).detach() diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask]) loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.lambd * torch.pow(diff_log.mean(), 2)) return loss
2,960
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNDepthEstimationHead(nn.Module): def __init__(self, config): super().__init__() self.config = config channels = config.decoder_hidden_size self.head = nn.Sequential( nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=False), nn.Conv2d(channels, 1, kernel_size=3, stride=1, padding=1), ) def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor: # use last features of the decoder hidden_states = hidden_states[self.config.head_in_index] hidden_states = self.head(hidden_states) predicted_depth = torch.sigmoid(hidden_states) * self.config.max_depth predicted_depth = predicted_depth.squeeze(dim=1) return predicted_depth
2,961
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
class GLPNForDepthEstimation(GLPNPreTrainedModel): def __init__(self, config): super().__init__(config) self.glpn = GLPNModel(config) self.decoder = GLPNDecoder(config) self.head = GLPNDepthEstimationHead(config) # Initialize weights and apply final processing self.post_init()
2,962
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
@add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]: r""" labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth depth estimation maps for computing the loss. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, GLPNForDepthEstimation >>> import torch >>> import numpy as np >>> from PIL import Image >>> import requests
2,962
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("vinvino02/glpn-kitti") >>> model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # interpolate to original size >>> post_processed_output = image_processor.post_process_depth_estimation( ... outputs, ... target_sizes=[(image.height, image.width)], ... )
2,962
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/glpn/modeling_glpn.py