text stringlengths 1 1.02k | class_index int64 0 10.8k | source stringlengths 85 188 |
|---|---|---|
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output = self.embeddings(pixel_values)
outputs = self.encoder(
embedding_output,
output_hidden_states=True,
return_dict=return_dict,
)
hidden_states = outputs.hidden_states if return_dict else outputs[1]
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
hidden_state = self.hidden_states_norms[stage](hidden_state)
feature_maps += (hidden_state,) | 2,919 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnextv2/modeling_convnextv2.py |
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=hidden_states if output_hidden_states else None,
attentions=None,
) | 2,919 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnextv2/modeling_convnextv2.py |
class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ConvNextV2Model`]. It is used to instantiate an
ConvNeXTV2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the ConvNeXTV2
[facebook/convnextv2-tiny-1k-224](https://huggingface.co/facebook/convnextv2-tiny-1k-224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information. | 2,920 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnextv2/configuration_convnextv2.py |
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
patch_size (`int`, *optional*, defaults to 4):
Patch size to use in the patch embedding layer.
num_stages (`int`, *optional*, defaults to 4):
The number of stages in the model.
hidden_sizes (`List[int]`, *optional*, defaults to `[96, 192, 384, 768]`):
Dimensionality (hidden size) at each stage.
depths (`List[int]`, *optional*, defaults to `[3, 3, 9, 3]`):
Depth (number of blocks) for each stage.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | 2,920 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnextv2/configuration_convnextv2.py |
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The drop rate for stochastic depth.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
out_features (`List[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`List[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how | 2,920 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnextv2/configuration_convnextv2.py |
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute. | 2,920 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnextv2/configuration_convnextv2.py |
Example:
```python
>>> from transformers import ConvNeXTV2Config, ConvNextV2Model
>>> # Initializing a ConvNeXTV2 convnextv2-tiny-1k-224 style configuration
>>> configuration = ConvNeXTV2Config()
>>> # Initializing a model (with random weights) from the convnextv2-tiny-1k-224 style configuration
>>> model = ConvNextV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "convnextv2"
def __init__(
self,
num_channels=3,
patch_size=4,
num_stages=4,
hidden_sizes=None,
depths=None,
hidden_act="gelu",
initializer_range=0.02,
layer_norm_eps=1e-12,
drop_path_rate=0.0,
image_size=224,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs) | 2,920 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnextv2/configuration_convnextv2.py |
self.num_channels = num_channels
self.patch_size = patch_size
self.num_stages = num_stages
self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
self.depths = [3, 3, 9, 3] if depths is None else depths
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.drop_path_rate = drop_path_rate
self.image_size = image_size
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
) | 2,920 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnextv2/configuration_convnextv2.py |
class VitPoseConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VitPoseForPoseEstimation`]. It is used to instantiate a
VitPose model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the VitPose
[usyd-community/vitpose-base-simple](https://huggingface.co/usyd-community/vitpose-base-simple) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information. | 2,921 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/configuration_vitpose.py |
Args:
backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `VitPoseBackboneConfig()`):
The configuration of the backbone model. Currently, only `backbone_config` with `vitpose_backbone` as `model_type` is supported.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library. | 2,921 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/configuration_vitpose.py |
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_factor (`int`, *optional*, defaults to 4):
Factor to upscale the feature maps coming from the ViT backbone.
use_simple_decoder (`bool`, *optional*, defaults to `True`):
Whether to use a `VitPoseSimpleDecoder` to decode the feature maps from the backbone into heatmaps. Otherwise it uses `VitPoseClassicDecoder`. | 2,921 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/configuration_vitpose.py |
Example:
```python
>>> from transformers import VitPoseConfig, VitPoseForPoseEstimation
>>> # Initializing a VitPose configuration
>>> configuration = VitPoseConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = VitPoseForPoseEstimation(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vitpose"
def __init__(
self,
backbone_config: PretrainedConfig = None,
backbone: str = None,
use_pretrained_backbone: bool = False,
use_timm_backbone: bool = False,
backbone_kwargs: dict = None,
initializer_range: float = 0.02,
scale_factor: int = 4,
use_simple_decoder: bool = True,
**kwargs,
):
super().__init__(**kwargs) | 2,921 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/configuration_vitpose.py |
if use_pretrained_backbone:
logger.info(
"`use_pretrained_backbone` is `True`. For the pure inference purpose of VitPose weight do not set this value."
)
if use_timm_backbone:
raise ValueError("use_timm_backbone set `True` is not supported at the moment.")
if backbone_config is None and backbone is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `VitPose` backbone.")
backbone_config = CONFIG_MAPPING["vitpose_backbone"](out_indices=[4])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config) | 2,921 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/configuration_vitpose.py |
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.backbone_config = backbone_config
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
self.initializer_range = initializer_range
self.scale_factor = scale_factor
self.use_simple_decoder = use_simple_decoder | 2,921 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/configuration_vitpose.py |
class VitPoseEstimatorOutput(ModelOutput):
"""
Class for outputs of pose estimation models. | 2,922 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Loss is not supported at this moment. See https://github.com/ViTAE-Transformer/ViTPose/tree/main/mmpose/models/losses for further detail.
heatmaps (`torch.FloatTensor` of shape `(batch_size, num_keypoints, height, width)`):
Heatmaps as predicted by the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
(also called feature maps) of the model at the output of each stage. | 2,922 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
sequence_length)`. | 2,922 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
heatmaps: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None | 2,922 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
class VitPosePreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = VitPoseConfig
base_model_prefix = "vit"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True | 2,923 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
# `trunc_normal_cpu` not implemented in `half` issues
module.weight.data = nn.init.trunc_normal_(
module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
).to(module.weight.dtype)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0) | 2,923 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
class VitPoseSimpleDecoder(nn.Module):
"""
Simple decoding head consisting of a ReLU activation, 4x upsampling and a 3x3 convolution, turning the
feature maps into heatmaps.
"""
def __init__(self, config) -> None:
super().__init__()
self.activation = nn.ReLU()
self.upsampling = nn.Upsample(scale_factor=config.scale_factor, mode="bilinear", align_corners=False)
self.conv = nn.Conv2d(
config.backbone_config.hidden_size, config.num_labels, kernel_size=3, stride=1, padding=1
)
def forward(self, hidden_state: torch.Tensor, flip_pairs: Optional[torch.Tensor] = None) -> torch.Tensor:
# Transform input: ReLU + upsample
hidden_state = self.activation(hidden_state)
hidden_state = self.upsampling(hidden_state)
heatmaps = self.conv(hidden_state)
if flip_pairs is not None:
heatmaps = flip_back(heatmaps, flip_pairs)
return heatmaps | 2,924 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
class VitPoseClassicDecoder(nn.Module):
"""
Classic decoding head consisting of a 2 deconvolutional blocks, followed by a 1x1 convolution layer,
turning the feature maps into heatmaps.
"""
def __init__(self, config: VitPoseConfig):
super().__init__()
self.deconv1 = nn.ConvTranspose2d(
config.backbone_config.hidden_size, 256, kernel_size=4, stride=2, padding=1, bias=False
)
self.batchnorm1 = nn.BatchNorm2d(256)
self.relu1 = nn.ReLU()
self.deconv2 = nn.ConvTranspose2d(256, 256, kernel_size=4, stride=2, padding=1, bias=False)
self.batchnorm2 = nn.BatchNorm2d(256)
self.relu2 = nn.ReLU()
self.conv = nn.Conv2d(256, config.num_labels, kernel_size=1, stride=1, padding=0) | 2,925 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
def forward(self, hidden_state: torch.Tensor, flip_pairs: Optional[torch.Tensor] = None):
hidden_state = self.deconv1(hidden_state)
hidden_state = self.batchnorm1(hidden_state)
hidden_state = self.relu1(hidden_state)
hidden_state = self.deconv2(hidden_state)
hidden_state = self.batchnorm2(hidden_state)
hidden_state = self.relu2(hidden_state)
heatmaps = self.conv(hidden_state)
if flip_pairs is not None:
heatmaps = flip_back(heatmaps, flip_pairs)
return heatmaps | 2,925 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
class VitPoseForPoseEstimation(VitPosePreTrainedModel):
def __init__(self, config: VitPoseConfig) -> None:
super().__init__(config)
self.backbone = load_backbone(config)
# add backbone attributes
if not hasattr(self.backbone.config, "hidden_size"):
raise ValueError("The backbone should have a hidden_size attribute")
if not hasattr(self.backbone.config, "image_size"):
raise ValueError("The backbone should have an image_size attribute")
if not hasattr(self.backbone.config, "patch_size"):
raise ValueError("The backbone should have a patch_size attribute")
self.head = VitPoseSimpleDecoder(config) if config.use_simple_decoder else VitPoseClassicDecoder(config)
# Initialize weights and apply final processing
self.post_init() | 2,926 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
@add_start_docstrings_to_model_forward(VITPOSE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=VitPoseEstimatorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: torch.Tensor,
dataset_index: Optional[torch.Tensor] = None,
flip_pairs: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, VitPoseEstimatorOutput]:
"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, VitPoseForPoseEstimation
>>> import torch
>>> from PIL import Image
>>> import requests
>>> processor = AutoImageProcessor.from_pretrained("usyd-community/vitpose-base-simple")
>>> model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple") | 2,926 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> boxes = [[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]]]
>>> inputs = processor(image, boxes=boxes, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> heatmaps = outputs.heatmaps
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
loss = None
if labels is not None:
raise NotImplementedError("Training is not yet supported") | 2,926 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
outputs = self.backbone.forward_with_filtered_kwargs(
pixel_values,
dataset_index=dataset_index,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
# Turn output hidden states in tensor of shape (batch_size, num_channels, height, width)
sequence_output = outputs.feature_maps[-1] if return_dict else outputs[0][-1]
batch_size = sequence_output.shape[0]
patch_height = self.config.backbone_config.image_size[0] // self.config.backbone_config.patch_size[0]
patch_width = self.config.backbone_config.image_size[1] // self.config.backbone_config.patch_size[1]
sequence_output = (
sequence_output.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width).contiguous()
)
heatmaps = self.head(sequence_output, flip_pairs=flip_pairs) | 2,926 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
if not return_dict:
if output_hidden_states:
output = (heatmaps,) + outputs[1:]
else:
output = (heatmaps,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return VitPoseEstimatorOutput(
loss=loss,
heatmaps=heatmaps,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) | 2,926 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/modeling_vitpose.py |
class VitPoseImageProcessor(BaseImageProcessor):
r"""
Constructs a VitPose image processor. | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
Args:
do_affine_transform (`bool`, *optional*, defaults to `True`):
Whether to apply an affine transformation to the input images.
size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 192}`):
Resolution of the image after `affine_transform` is applied. Only has an effect if `do_affine_transform` is set to `True`. Can
be overriden by `size` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.).
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation. | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
image_mean (`List[int]`, defaults to `[0.485, 0.456, 0.406]`, *optional*):
The sequence of means for each channel, to be used when normalizing images.
image_std (`List[int]`, defaults to `[0.229, 0.224, 0.225]`, *optional*):
The sequence of standard deviations for each channel, to be used when normalizing images.
""" | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
model_input_names = ["pixel_values"]
def __init__(
self,
do_affine_transform: bool = True,
size: Dict[str, int] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.do_affine_transform = do_affine_transform
self.size = size if size is not None else {"height": 256, "width": 192}
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.normalize_factor = 200.0 | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
def affine_transform(
self,
image: np.array,
center: Tuple[float],
scale: Tuple[float],
rotation: float,
size: Dict[str, int],
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.array:
"""
Apply an affine transformation to an image. | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
Args:
image (`np.array`):
Image to transform.
center (`Tuple[float]`):
Center of the bounding box (x, y).
scale (`Tuple[float]`):
Scale of the bounding box with respect to height/width.
rotation (`float`):
Rotation angle in degrees.
size (`Dict[str, int]`):
Size of the destination image.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format of the output image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image.
"""
data_format = input_data_format if data_format is None else data_format
size = (size["width"], size["height"]) | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
# one uses a pixel standard deviation of 200 pixels
transformation = get_warp_matrix(rotation, center * 2.0, np.array(size) - 1.0, scale * 200.0)
# input image requires channels last format
image = (
image
if input_data_format == ChannelDimension.LAST
else to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)
)
image = scipy_warp_affine(src=image, M=transformation, size=(size[1], size[0]))
image = to_channel_dimension_format(image, data_format, ChannelDimension.LAST)
return image | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
def preprocess(
self,
images: ImageInput,
boxes: Union[List[List[float]], np.ndarray],
do_affine_transform: bool = None,
size: Dict[str, int] = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`. | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
boxes (`List[List[List[float]]]` or `np.ndarray`):
List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the bounding
box coordinates in COCO format (top_left_x, top_left_y, width, height). | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
do_affine_transform (`bool`, *optional*, defaults to `self.do_affine_transform`):
Whether to apply an affine transformation to the input images.
size (`Dict[str, int]` *optional*, defaults to `self.size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
resizing.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`. | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to `'np'`):
If set, will return tensors of a particular framework. Acceptable values are: | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields: | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
- **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,
width).
"""
do_affine_transform = do_affine_transform if do_affine_transform is not None else self.do_affine_transform
size = size if size is not None else self.size
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
) | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
if isinstance(boxes, list) and len(images) != len(boxes):
raise ValueError(f"Batch of images and boxes mismatch : {len(images)} != {len(boxes)}")
elif isinstance(boxes, np.ndarray) and len(images) != boxes.shape[0]:
raise ValueError(f"Batch of images and boxes mismatch : {len(images)} != {boxes.shape[0]}")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0]) | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
# transformations (affine transformation + rescaling + normalization)
if self.do_affine_transform:
new_images = []
for image, image_boxes in zip(images, boxes):
for box in image_boxes:
center, scale = box_to_center_and_scale(
box,
image_width=size["width"],
image_height=size["height"],
normalize_factor=self.normalize_factor,
)
transformed_image = self.affine_transform(
image, center, scale, rotation=0, size=size, input_data_format=input_data_format
)
new_images.append(transformed_image)
images = new_images | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
# For batch processing, the number of boxes must be consistent across all images in the batch.
# When using a list input, the number of boxes can vary dynamically per image.
# The image processor creates pixel_values of shape (batch_size*num_persons, num_channels, height, width)
all_images = []
for image in images:
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
return encoded_inputs
def keypoints_from_heatmaps(
self,
heatmaps: np.ndarray,
center: np.ndarray,
scale: np.ndarray,
kernel: int = 11,
):
"""
Get final keypoint predictions from heatmaps and transform them back to
the image.
Args:
heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width])`):
Model predicted heatmaps.
center (`np.ndarray` of shape `(batch_size, 2)`):
Center of the bounding box (x, y).
scale (`np.ndarray` of shape `(batch_size, 2)`):
Scale of the bounding box wrt original images of width and height.
kernel (int, *optional*, defaults to 11):
Gaussian kernel size (K) for modulation, which should match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2. | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
Returns:
tuple: A tuple containing keypoint predictions and scores.
- preds (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`):
Predicted keypoint location in images.
- scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`):
Scores (confidence) of the keypoints.
"""
batch_size, _, height, width = heatmaps.shape
coords, scores = get_keypoint_predictions(heatmaps)
preds = post_dark_unbiased_data_processing(coords, heatmaps, kernel=kernel)
# Transform back to the image
for i in range(batch_size):
preds[i] = transform_preds(preds[i], center=center[i], scale=scale[i], output_size=[height, width])
return preds, scores | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
def post_process_pose_estimation(
self,
outputs: "VitPoseEstimatorOutput",
boxes: Union[List[List[List[float]]], np.ndarray],
kernel_size: int = 11,
threshold: float = None,
target_sizes: Union[TensorType, List[Tuple]] = None,
):
"""
Transform the heatmaps into keypoint predictions and transform them back to the image. | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
Args:
outputs (`VitPoseEstimatorOutput`):
VitPoseForPoseEstimation model outputs.
boxes (`List[List[List[float]]]` or `np.ndarray`):
List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the bounding
box coordinates in COCO format (top_left_x, top_left_y, width, height).
kernel_size (`int`, *optional*, defaults to 11):
Gaussian kernel size (K) for modulation.
threshold (`float`, *optional*, defaults to None):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will be resize with the default value.
Returns: | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
`List[List[Dict]]`: A list of dictionaries, each dictionary containing the keypoints and boxes for an image
in the batch as predicted by the model.
""" | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
# First compute centers and scales for each bounding box
batch_size, num_keypoints, _, _ = outputs.heatmaps.shape
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
) | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
centers = np.zeros((batch_size, 2), dtype=np.float32)
scales = np.zeros((batch_size, 2), dtype=np.float32)
flattened_boxes = list(itertools.chain(*boxes))
for i in range(batch_size):
if target_sizes is not None:
image_width, image_height = target_sizes[i][0], target_sizes[i][1]
scale_factor = np.array([image_width, image_height, image_width, image_height])
flattened_boxes[i] = flattened_boxes[i] * scale_factor
width, height = self.size["width"], self.size["height"]
center, scale = box_to_center_and_scale(flattened_boxes[i], image_width=width, image_height=height)
centers[i, :] = center
scales[i, :] = scale
preds, scores = self.keypoints_from_heatmaps(
outputs.heatmaps.cpu().numpy(), centers, scales, kernel=kernel_size
) | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
all_boxes = np.zeros((batch_size, 4), dtype=np.float32)
all_boxes[:, 0:2] = centers[:, 0:2]
all_boxes[:, 2:4] = scales[:, 0:2]
poses = torch.tensor(preds)
scores = torch.tensor(scores)
labels = torch.arange(0, num_keypoints)
bboxes_xyxy = torch.tensor(coco_to_pascal_voc(all_boxes))
results: List[List[Dict[str, torch.Tensor]]] = []
pose_bbox_pairs = zip(poses, scores, bboxes_xyxy) | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
for image_bboxes in boxes:
image_results: List[Dict[str, torch.Tensor]] = []
for _ in image_bboxes:
# Unpack the next pose and bbox_xyxy from the iterator
pose, score, bbox_xyxy = next(pose_bbox_pairs)
score = score.squeeze()
keypoints_labels = labels
if threshold is not None:
keep = score > threshold
pose = pose[keep]
score = score[keep]
keypoints_labels = keypoints_labels[keep]
pose_result = {"keypoints": pose, "scores": score, "labels": keypoints_labels, "bbox": bbox_xyxy}
image_results.append(pose_result)
results.append(image_results)
return results | 2,927 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/vitpose/image_processing_vitpose.py |
class MistralConverter:
"""
A general tiktoken converter.
"""
def __init__(
self,
vocab=None,
pattern=r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""",
add_prefix_space=False,
additional_special_tokens=None,
*args,
**kwargs,
):
super().__init__(*args)
self.vocab = vocab
self.pattern = pattern
self.add_prefix_space = add_prefix_space
self.additional_special_tokens = additional_special_tokens
def extract_vocab_merges_from_model(self, vocab: str):
bpe_ranks = vocab
byte_encoder = bytes_to_unicode()
def token_bytes_to_string(b):
return "".join([byte_encoder[ord(char)] for char in b.decode("latin-1")]) | 2,928 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/convert_pixtral_weights_to_hf.py |
merges = []
vocab = {}
for idx, (token, rank) in enumerate(bpe_ranks.items()):
if token not in self.additional_special_tokens:
vocab[token_bytes_to_string(token)] = idx
if len(token) == 1:
continue
local = []
for index in range(1, len(token)):
piece_l, piece_r = token[:index], token[index:]
if piece_l in bpe_ranks and piece_r in bpe_ranks and (piece_l + piece_r) in bpe_ranks:
local.append((piece_l, piece_r, rank))
local = sorted(local, key=lambda x: (bpe_ranks[x[0]], bpe_ranks[x[1]]), reverse=False)
merges.extend(local)
else:
vocab[token] = idx
merges = sorted(merges, key=lambda val: val[2], reverse=False)
merges = [(token_bytes_to_string(val[0]), token_bytes_to_string(val[1])) for val in merges]
return vocab, merges | 2,928 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/convert_pixtral_weights_to_hf.py |
def tokenizer(self):
vocab_scores, merges = self.extract_vocab_merges_from_model(self.vocab)
tokenizer = Tokenizer(BPE(vocab_scores, merges, fuse_unk=False))
if hasattr(tokenizer.model, "ignore_merges"):
tokenizer.model.ignore_merges = True
return tokenizer
def converted(self) -> Tokenizer:
tokenizer = self.tokenizer()
tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[
pre_tokenizers.Split(Regex(self.pattern), behavior="isolated", invert=False),
pre_tokenizers.ByteLevel(add_prefix_space=self.add_prefix_space, use_regex=False),
]
)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.add_special_tokens(self.additional_special_tokens)
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
return tokenizer | 2,928 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/convert_pixtral_weights_to_hf.py |
class PixtralVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PixtralVisionModel`]. It is used to instantiate an
Pixtral vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to the vision encoder used by Pixtral-12B.
e.g. [pixtral-hf/pixtral-9b](https://huggingface.co/pixtral-hf/pixtral-9b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information. | 2,929 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/configuration_pixtral.py |
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of input channels in the input images.
image_size (`int`, *optional*, defaults to 1024):
Max dimension of the input images.
patch_size (`int`, *optional*, defaults to 16):
Size of the image patches.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
Activation function used in the hidden layers.
attention_dropout (`float`, *optional*, defaults to 0.0): | 2,929 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/configuration_pixtral.py |
Dropout probability for the attention layers.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | 2,929 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/configuration_pixtral.py |
Example:
```python
>>> from transformers import PixtralVisionModel, PixtralVisionConfig
>>> # Initializing a Pixtral-12B style configuration
>>> config = PixtralVisionConfig()
>>> # Initializing a model (with randomly initialized weights) from the configuration
>>> model = PixtralVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pixtral"
def __init__(
self,
hidden_size=1024,
intermediate_size=4096,
num_hidden_layers=24,
num_attention_heads=16,
num_channels=3,
image_size=1024,
patch_size=16,
hidden_act="gelu",
attention_dropout=0.0,
rope_theta=10000.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs) | 2,929 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/configuration_pixtral.py |
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.attention_dropout = attention_dropout
self.hidden_act = hidden_act
self.rope_theta = rope_theta
self.head_dim = hidden_size // num_attention_heads
self.initializer_range = initializer_range | 2,929 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/configuration_pixtral.py |
class PixtralProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
},
"images_kwargs": {},
"common_kwargs": {
"return_tensors": "pt",
},
} | 2,930 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
class BatchMixFeature(BatchFeature):
def to(self, *args, **kwargs) -> "BatchMixFeature":
"""
Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in
different `dtypes` and sending the `BatchFeature` to a different `device`.
Args:
args (`Tuple`):
Will be passed to the `to(...)` function of the tensors.
kwargs (`Dict`, *optional*):
Will be passed to the `to(...)` function of the tensors.
Returns:
[`BatchFeature`]: The same instance after modification.
""" | 2,931 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
def _recursive_to(obj, device, *args, **kwargs):
# Lists can be nested, so keep digging until we hit tensors
if isinstance(obj, list):
return [_recursive_to(o, device, *args, **kwargs) for o in obj]
# We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor`
elif isinstance(obj, torch.Tensor) and torch.is_floating_point(obj):
# cast and send to device
return obj.to(*args, **kwargs)
elif isinstance(obj, torch.Tensor) and device is not None:
# only send to device, don't cast
return obj.to(device=device)
else:
return obj
requires_backends(self, ["torch"])
import torch # noqa | 2,931 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
device = kwargs.get("device")
# Check if the args are a device or a dtype
if device is None and len(args) > 0:
# device should be always the first argument
arg = args[0]
if is_torch_dtype(arg):
# The first argument is a dtype
pass
elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):
device = arg
else:
# it's something else
raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.")
self.data = {k: _recursive_to(v, device, *args, **kwargs) for k, v in self.data.items()}
return self | 2,931 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
class PixtralProcessor(ProcessorMixin):
r"""
Constructs a Pixtral processor which wraps a Pixtral image processor and a Pixtral tokenizer into a single processor.
[`PixtralProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~PixtralProcessor.__call__`] and [`~PixtralProcessor.decode`] for more information. | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
Args:
image_processor ([`PixtralImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
patch_size (`int`, *optional*, defaults to 16):
Patch size from the vision tower.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
image_token (`str`, *optional*, defaults to `"[IMG]"`):
Special token used to denote image location.
image_break_token (`str`, *optional*, defaults to `"[IMG_BREAK]"`):
Special token used to denote the end of a line of pixels in an image.
image_end_token (`str`, *optional*, defaults to `"[IMG_END]"`):
Special token used to denote the end of an image input.
""" | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
attributes = ["image_processor", "tokenizer"]
valid_kwargs = [
"chat_template",
"patch_size",
"image_token",
"image_break_token",
"image_end_token",
]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(
self,
image_processor=None,
tokenizer=None,
patch_size: int = 16,
chat_template=None,
image_token="[IMG]", # set the default and let users change if they have peculiar special tokens in rare cases
image_break_token="[IMG_BREAK]",
image_end_token="[IMG_END]",
**kwargs,
):
self.patch_size = patch_size
self.image_token = image_token
self.image_break_token = image_break_token
self.image_end_token = image_end_token
super().__init__(image_processor, tokenizer, chat_template=chat_template) | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
def __call__(
self,
images: ImageInput = None,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
audio=None,
videos=None,
**kwargs: Unpack[PixtralProcessorKwargs],
) -> BatchMixFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
of the above two methods for more information. | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are: | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
# check if images and text inputs are reversed for BC
images, text = _validate_images_text_input_order(images, text) | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
output_kwargs = self._merge_kwargs(
PixtralProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
) | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
if images is not None:
if is_image_or_image_url(images):
if isinstance(text, str) or isinstance(text, list) and len(text) == 1:
# If there's a single sample, the image must belong to it
images = [[images]]
else:
raise ValueError(
"You have supplied multiple text samples, but `images` is not a nested list. When processing multiple samples, `images` should be a list of lists of images, one list per sample."
)
elif isinstance(images, list) and is_image_or_image_url(images[0]):
if isinstance(text, str) or isinstance(text, list) and len(text) == 1:
# If there's a single sample, all images must belong to it
images = [images]
else:
raise ValueError( | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
"You have supplied multiple text samples, but `images` is not a nested list. When processing multiple samples, `images` should be a list of lists of images, one list per sample."
)
elif isinstance(images, list) and isinstance(images[0], list) and is_image_or_image_url(images[0][0]):
pass
else:
raise ValueError(
"Invalid input images. Please provide a single image, a list of images, or a list of lists of images."
)
images = [[load_image(im) for im in sample] for sample in images]
image_inputs = self.image_processor(images, patch_size=self.patch_size, **output_kwargs["images_kwargs"])
else:
image_inputs = {} | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
# try to expand inputs in processing if we have the necessary parts
prompt_strings = text
if image_inputs.get("pixel_values") is not None:
# Replace the image token with the expanded image token sequence
images = image_inputs["pixel_values"]
image_sizes = image_inputs.pop("image_sizes")
prompt_strings = [] | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
for sample_images, sample_image_sizes, sample in zip(images, image_sizes, text):
replace_strings = []
# First calculate the number of tokens needed for each image and put in a placeholder
for image, image_size in zip(sample_images, sample_image_sizes):
height, width = image_size
num_height_tokens = height // self.patch_size
num_width_tokens = width // self.patch_size
replace_tokens = [
[self.image_token] * num_width_tokens + [self.image_break_token]
] * num_height_tokens
# Flatten list
replace_tokens = [item for sublist in replace_tokens for item in sublist]
replace_tokens[-1] = self.image_end_token
replace_str = "".join(replace_tokens)
replace_strings.append(replace_str) | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
sample = sample.replace(self.image_token, "<placeholder>", 1) | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
while "<placeholder>" in sample:
replace_str = replace_strings.pop(0)
sample = sample.replace("<placeholder>", replace_str, 1)
prompt_strings.append(sample)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
return BatchMixFeature(data={**text_inputs, **image_inputs})
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs) | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) | 2,932 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/processing_pixtral.py |
class PixtralRotaryEmbedding(nn.Module):
"""
The key with pixtral embedding is just that you have a frequency for each pixel positions.
If you have height x width pixels (or embedding pixels), then the frequency used for ROPE
is given by indexing the pre_computed frequency on the width and height.
What you output is of dimension (batch, height * width, dim) with dim the embed dim.
This simply means that for each image hidden state, you are going to add
a corresponding positional embedding, based on its index in the grid.
"""
def __init__(self, config, device):
super().__init__()
self.rope_type = "default"
self.dim = config.head_dim
self.base = config.rope_theta
max_patches_per_side = config.image_size // config.patch_size
freqs = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float() / self.dim)) | 2,933 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
h = torch.arange(max_patches_per_side, device=freqs.device)
w = torch.arange(max_patches_per_side, device=freqs.device)
freqs_h = torch.outer(h, freqs[::2]).float()
freqs_w = torch.outer(w, freqs[1::2]).float()
inv_freq = torch.cat(
[
freqs_h[:, None, :].repeat(1, max_patches_per_side, 1),
freqs_w[None, :, :].repeat(max_patches_per_side, 1, 1),
],
dim=-1,
).reshape(-1, self.dim // 2) # we reshape to only index on the position indexes, not tuple of indexes
# Different from paper, but it uses a different permutation in order to obtain the same calculation
# TODO maybe make it torch compatible later on. We can also just slice
self.register_buffer("inv_freq", torch.cat((inv_freq, inv_freq), dim=-1), persistent=False) | 2,933 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
@torch.no_grad()
def forward(self, x, position_ids):
if "dynamic" in self.rope_type:
self._dynamic_frequency_update(position_ids, device=x.device)
# Core RoPE block
freqs = self.inv_freq[position_ids]
# position_ids_expanded = position_ids[:, None, :].float()
# Force float32 (see https://github.com/huggingface/transformers/pull/29285)
device_type = x.device.type
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False):
emb = freqs
cos = emb.cos()
sin = emb.sin()
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) | 2,933 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
def _dynamic_frequency_update(self, position_ids, device):
"""
dynamic RoPE layers should recompute `inv_freq` in the following situations:
1 - growing beyond the cached sequence length (allow scaling)
2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
"""
seq_len = torch.max(position_ids) + 1
if seq_len > self.max_seq_len_cached: # growth
inv_freq, self.attention_scaling = self.rope_init_fn(
self.config, device, seq_len=seq_len, **self.rope_kwargs
)
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
self.max_seq_len_cached = seq_len | 2,933 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
self.max_seq_len_cached = self.original_max_seq_len | 2,933 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
class PixtralAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.o_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) | 2,934 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, patches, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, unsqueeze_dim=0) | 2,934 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(batch_size, patches, -1)
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights | 2,934 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
class PixtralMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj | 2,935 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
class PixtralRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
PixtralRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" | 2,936 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
class PixtralAttentionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention_norm = PixtralRMSNorm(config.hidden_size, eps=1e-5)
self.feed_forward = PixtralMLP(config)
self.attention = PixtralAttention(config)
self.ffn_norm = PixtralRMSNorm(config.hidden_size, eps=1e-5) | 2,937 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`.
attention_mask (`torch.FloatTensor`):
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states | 2,937 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
hidden_states = self.attention_norm(hidden_states)
hidden_states, attn_weights = self.attention(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.ffn_norm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs | 2,937 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
class PixtralTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layers = torch.nn.ModuleList()
for _ in range(config.num_hidden_layers):
self.layers.append(PixtralAttentionLayer(config))
self.gradient_checkpointing = False | 2,938 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Embeddings which serve as input to the Transformer.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**. | 2,938 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 2,938 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(
encoder_layer.__call__,
hidden_states,
attention_mask,
position_embeddings,
output_attentions,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
position_embeddings=position_embeddings,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0] | 2,938 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=[hidden_states], attentions=all_attentions
) | 2,938 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
class PixtralPreTrainedModel(PreTrainedModel):
config_class = PixtralVisionConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["PixtralVisionAttention"]
_skip_keys_device_placement = "past_key_values"
_supports_cache_class = True
def _init_weights(self, module):
std = (
self.config.initializer_range
if hasattr(self.config, "initializer_range")
else self.config.initializer_range
)
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_() | 2,939 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
class PixtralVisionModel(PixtralPreTrainedModel):
base_model_prefix = "vision_encoder"
def __init__(self, config):
super().__init__(config)
self.config = config
self.patch_conv = nn.Conv2d(
in_channels=config.num_channels,
out_channels=config.hidden_size,
kernel_size=config.patch_size,
stride=config.patch_size,
bias=False,
)
self.ln_pre = PixtralRMSNorm(config.hidden_size, eps=1e-5)
self.transformer = PixtralTransformer(config)
self.patch_positional_embedding = PixtralRotaryEmbedding(config, device=self.device) | 2,940 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
@add_start_docstrings_to_model_forward(PIXTRAL_INPUTS_DOCSTRING)
def forward(
self,
pixel_values: List[torch.Tensor],
output_hidden_states: Optional[bool] = False,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
*args,
**kwargs,
) -> Union[Tuple, BaseModelOutput]:
"""
Returns:
pixel_values: tensor of token features for
all tokens of all images of shape (N_toks, D)
"""
# pass images through initial convolution independently
if len(pixel_values) > 1:
raise ValueError("Batching/padding not supported yet!")
patch_embeds_list = [self.patch_conv(img.to(self.dtype)) for sample in pixel_values for img in sample] | 2,940 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pixtral/modeling_pixtral.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.