text stringlengths 5 631k | id stringlengths 14 178 | metadata dict | __index_level_0__ int64 0 647 |
|---|---|---|---|
# coding=utf-8
# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor class for KOSMOS-2."""
import copy
import math
import re
from typing import Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput, is_batched
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
from ...tokenization_utils import AddedToken
from ...tokenization_utils_base import BatchEncoding, TextInput
BboxInput = Union[
list[tuple[int, int]],
list[tuple[float, float, float, float]],
list[list[tuple[int, int]]],
list[list[tuple[float, float, float]]],
]
class Kosmos2ImagesKwargs(ImagesKwargs, total=False):
bboxes: Optional[list[float]]
num_image_tokens: Optional[int]
first_image_token_id: Optional[int]
class Kosmos2TextKwargs(TextKwargs, total=False):
add_eos_token: Optional[bool]
class Kosmos2ProcessorKwargs(ProcessingKwargs, total=False):
text_kwargs: Kosmos2TextKwargs
images_kwargs: Kosmos2ImagesKwargs
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": False,
"stride": 0,
"return_overflowing_tokens": False,
"return_special_tokens_mask": False,
"return_offsets_mapping": False,
"return_token_type_ids": False,
"verbose": True,
"add_eos_token": False,
},
"images_kwargs": {
"num_image_tokens": 64,
},
}
class Kosmos2Processor(ProcessorMixin):
r"""
Constructs an KOSMOS-2 processor which wraps a KOSMOS-2 image processor and a KOSMOS-2 tokenizer into a single
processor.
[`Kosmos2Processor`] offers all the functionalities of [`CLIPImageProcessor`] and some functionalities of
[`XLMRobertaTokenizerFast`]. See the docstring of [`~Kosmos2Processor.__call__`] and [`~Kosmos2Processor.decode`]
for more information.
Args:
image_processor (`CLIPImageProcessor`):
An instance of [`CLIPImageProcessor`]. The image processor is a required input.
tokenizer (`XLMRobertaTokenizerFast`):
An instance of ['XLMRobertaTokenizerFast`]. The tokenizer is a required input.
num_patch_index_tokens (`int`, *optional*, defaults to 1024):
The number of tokens that represent patch indices.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = ("CLIPImageProcessor", "CLIPImageProcessorFast")
tokenizer_class = "AutoTokenizer"
def __init__(self, image_processor, tokenizer, num_patch_index_tokens=1024, *kwargs):
tokenizer.return_token_type_ids = False
self.eod_token = "</doc>"
self.boi_token = ""
self.eoc_token = "</chunk>"
self.eol_token = "</line>"
self.bop_token = "<phrase>"
self.eop_token = "</phrase>"
self.boo_token = "<object>"
self.eoo_token = "</object>"
self.dom_token = "</delimiter_of_multi_objects/>"
self.grd_token = "<grounding>"
self.tag_tokens = [
self.eod_token,
self.boi_token,
self.eoi_token,
self.eoc_token,
self.eol_token,
self.bop_token,
self.eop_token,
self.boo_token,
self.eoo_token,
self.dom_token,
self.grd_token,
]
self.num_patch_index_tokens = num_patch_index_tokens
patch_index_tokens = [f"<patch_index_{str(x).zfill(4)}>" for x in range(self.num_patch_index_tokens)]
tokens_to_add = []
for token in self.tag_tokens + patch_index_tokens:
tokens_to_add.append(AddedToken(token, lstrip=True, rstrip=False, normalized=False))
tokenizer.add_tokens(tokens_to_add)
super().__init__(image_processor, tokenizer)
def __call__(
self,
images: ImageInput = None,
text: Union[TextInput, list[TextInput]] = None,
audio=None,
videos=None,
**kwargs: Unpack[Kosmos2ProcessorKwargs],
) -> BatchFeature:
"""
This method uses [`CLIPImageProcessor.__call__`] method to prepare image(s) for the model, and
[`XLMRobertaTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
The rest of this documentation shows the arguments specific to `Kosmos2Processor`.
Args:
bboxes (`Union[list[tuple[int]], list[tuple[float]], list[list[tuple[int]]], list[list[tuple[float]]]]`, *optional*):
The bounding bboxes associated to `texts`.
num_image_tokens (`int`, *optional* defaults to 64):
The number of (consecutive) places that are used to mark the placeholders to store image information.
This should be the same as `latent_query_num` in the instance of `Kosmos2Config` you are using.
first_image_token_id (`int`, *optional*):
The token id that will be used for the first place of the subsequence that is reserved to store image
information. If unset, will default to `self.tokenizer.unk_token_id + 1`.
add_eos_token (`bool`, defaults to `False`):
Whether or not to include `EOS` token id in the encoding when `add_special_tokens=True`.
"""
if images is None and text is None:
raise ValueError("You have to specify either images or text.")
output_kwargs = self._merge_kwargs(
Kosmos2ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
bboxes = output_kwargs["images_kwargs"].pop("bboxes", None)
num_image_tokens = output_kwargs["images_kwargs"].pop("num_image_tokens", 64)
first_image_token_id = output_kwargs["images_kwargs"].pop("first_image_token_id", None)
add_eos_token = output_kwargs["text_kwargs"].pop("add_eos_token", False)
add_special_tokens = output_kwargs["text_kwargs"]["add_special_tokens"]
padding = output_kwargs["text_kwargs"]["padding"]
return_tensors = output_kwargs["text_kwargs"].setdefault("return_tensors", None)
encoding = BatchFeature()
if images is not None:
image_encoding = self.image_processor(images, **output_kwargs["images_kwargs"])
encoding.update(image_encoding)
if text is not None:
text = self.preprocess_examples(text, images, bboxes, num_image_tokens=num_image_tokens)
if add_special_tokens and not add_eos_token:
if isinstance(text, str):
text = f"{self.tokenizer.bos_token}{text}"
elif isinstance(text, list):
text = [f"{self.tokenizer.bos_token}{s}" for s in text]
output_kwargs["text_kwargs"]["add_special_tokens"] = (
output_kwargs["text_kwargs"]["add_special_tokens"] and add_eos_token
)
output_kwargs["text_kwargs"]["padding"] = padding if images is None else False
output_kwargs["text_kwargs"]["return_tensors"] = return_tensors if images is None else None
text_encoding = self.tokenizer(text=text, **output_kwargs["text_kwargs"])
encoding.update(text_encoding)
output_kwargs["text_kwargs"]["add_special_tokens"] = add_special_tokens
output_kwargs["text_kwargs"]["padding"] = padding
output_kwargs["text_kwargs"]["return_tensors"] = return_tensors
if text is not None and images is not None:
# Use the id of the first token after <unk>
if first_image_token_id is None:
first_image_token_id = self.tokenizer.unk_token_id + 1
# To see if we need one more `0` (for `<s>`) at the beginning of `image_embeds_position_mask`.
with_bos = add_special_tokens
# The first (actual) ``
text = f"{img_info_tokens} {text}"
# Add `<object> <patch_idx_xxxx> <patch_idx_yyy> </object>` after `<phrase> phrase text </phrase>`
text = self._insert_patch_index_tokens(text, bboxes)
return text
def preprocess_examples(
self,
texts: Union[TextInput, list[TextInput]],
images: ImageInput = None,
bboxes: BboxInput = None,
num_image_tokens: Optional[int] = 64,
) -> Union[str, list[str]]:
"""Add image and bounding box information to `texts` as image and patch index tokens.
Args:
texts (`Union[TextInput, list[TextInput]]`): The texts to be processed.
images (`ImageInput`, *optional*): The images associated to `texts`.
bboxes (`Union[list[tuple[int]], list[tuple[float]], list[list[tuple[int]]], list[list[tuple[float]]]]`, *optional*):
The bounding bboxes associated to `texts`.
num_image_tokens (`int`, *optional*, defaults to 64):
The number of image tokens (used as latent queries). This should corresponds to the `latent_query_num`
attribute in `Kosmos2Config`.
Returns:
`Union[TextInput, list[TextInput]]`: The processed texts with image and patch index tokens.
"""
# These are fake ``.
img_tokens = [self.boi_token] * num_image_tokens
img_info_tokens = " ".join([self.boi_token] + img_tokens + [self.eoi_token])
# make batch to simplify processing logic
batched = True
if isinstance(texts, str):
batched = False
texts = [texts]
if images is None:
images = [None] * len(texts)
elif not is_batched(images):
images = [images]
if len(texts) != len(images):
raise ValueError(
f"The number of examples in `texts` and `images` should be the same. Got {len(texts)} v.s. {len(images)} instead."
)
if not batched:
self._check_bboxes_for_single_text(bboxes)
bboxes = [bboxes]
elif bboxes is not None:
if not isinstance(bboxes, list):
raise ValueError("`bboxes` should be `None` or a list (as a batch) when `texts` is passed as a batch.")
for x in bboxes:
self._check_bboxes_for_single_text(x)
else:
bboxes = [None] * len(texts)
if len(bboxes) != len(texts):
raise ValueError(
f"The number of examples in `texts` and `bboxes` should be the same. Got {len(texts)} v.s. {len(bboxes)} instead."
)
result = [
self._preprocess_single_example(text, image, bbox, img_info_tokens)
for text, image, bbox in zip(texts, images, bboxes)
]
# un-batch if necessary
if not batched:
result = result[0]
return result
def post_process_generation(self, text, cleanup_and_extract=True):
caption = text.split(self.eoi_token)[-1]
if cleanup_and_extract:
return clean_text_and_extract_entities_with_bboxes(caption)
return caption
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs):
"""
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
"""
generated_texts = self.batch_decode(generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs)
return [self.post_process_generation(text, cleanup_and_extract=False) for text in generated_texts]
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return tokenizer_input_names + image_processor_input_names + ["image_embeds_position_mask"]
def _insert_patch_index_tokens(self, text: str, bboxes: Union[list[tuple[int]], list[tuple[float]]]) -> str:
if bboxes is None or len(bboxes) == 0:
return text
matched_phrases = list(re.finditer(r"<phrase>.+?</phrase>", string=text))
if len(matched_phrases) != len(bboxes):
raise ValueError(
f"The number of elements in `bboxes` should be the same as the number of `<phrase> ... </phrase>` pairs in `text`. Got {len(matched_phrases)} v.s. {len(bboxes)} instead."
)
# insert object's patch index tokens
# the found `<phrase> ... </phrase>` pairs.
curr_pos = 0
buffer = []
for matched, bbox in zip(matched_phrases, bboxes):
_, end = matched.span()
buffer.append(text[curr_pos:end])
curr_pos = end
# A phrase without bbox
if bbox is None:
continue
# A phrase with a single bbox
if isinstance(bbox, tuple):
bbox = [bbox]
patch_index_strings = []
# A phrase could have multiple bboxes
if not all(box is not None for box in bbox):
raise ValueError(
"The multiple bounding boxes for a single phrase should not contain any `None` value."
)
for box in bbox:
patch_index_1, patch_index_2 = self._convert_bbox_to_patch_index_tokens(box)
patch_index_strings.append(f"{patch_index_1} {patch_index_2}")
# `bbox` being an empty list
if len(patch_index_strings) == 0:
continue
position_str = " </delimiter_of_multi_objects/> ".join(patch_index_strings)
buffer.append(f"<object> {position_str} </object>")
# remaining
if curr_pos < len(text):
buffer.append(text[curr_pos:])
text = "".join(buffer)
return text
def _convert_bbox_to_patch_index_tokens(
self, bbox: Union[tuple[int, int], tuple[float, float, float, float]]
) -> tuple[str, str]:
# already computed patch indices
if len(bbox) == 2:
idx_1, idx_2 = bbox
# bbox specified with (normalized) coordinates
else:
# use `self.tokenizer` to get `num_patches_per_side`
num_patches_per_side = int(math.sqrt(self.num_patch_index_tokens))
idx_1, idx_2 = coordinate_to_patch_index(bbox, num_patches_per_side)
token_1 = f"<patch_index_{str(idx_1).zfill(4)}>"
token_2 = f"<patch_index_{str(idx_2).zfill(4)}>"
return token_1, token_2
def coordinate_to_patch_index(bbox: tuple[float, float, float, float], num_patches_per_side: int) -> tuple[int, int]:
"""Convert a bounding box to a pair of patch indices.
Args:
bbox (`tuple[float, float, float, float]`):
The 4 coordinates of the bounding box, with the format being (x1, y1, x2, y2) specifying the upper-left and
lower-right corners of the box. It should have x2 > x1 and y2 > y1.
num_patches_per_side (`int`): the number of patches along each side.
Returns:
`tuple[int, int]`: A pair of patch indices representing the upper-left patch and lower-right patch.
"""
(x1, y1, x2, y2) = bbox
if not (x2 > x1 and y2 > y1):
raise ValueError("The coordinates in `bbox` should be `(x1, y1, x2, y2)` with `x2 > x1` and `y2 > y1`.")
ul_x = math.floor(x1 * num_patches_per_side)
ul_y = math.floor(y1 * num_patches_per_side)
lr_x = math.ceil(x2 * num_patches_per_side - 1)
lr_y = math.ceil(y2 * num_patches_per_side - 1)
ul_idx = ul_y * num_patches_per_side + ul_x
lr_idx = lr_y * num_patches_per_side + lr_x
return ul_idx, lr_idx
# copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L35C1-L75C38
# (with format modifications)
def patch_index_to_coordinate(ul_idx: int, lr_idx: int, num_patches_per_side: int):
"""
Given a grid of length `num_patches_per_side` and the indices of the upper-left and lower-right corners of a
bounding box, returns the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2).
Args:
ul_idx (`int`): the index of the grid cell that corresponds to the upper-left corner of the bounding box.
lr_idx (`int`): the index of the grid cell that corresponds to the lower-right corner of the bounding box.
num_patches_per_side (`int`): the number of patches along each side.
Returns:
`tuple[float]`: the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2).
"""
# Compute the size of each cell in the grid
cell_size = 1.0 / num_patches_per_side
# Compute the x and y indices of the upper-left and lower-right corners of the bounding box
ul_x = ul_idx % num_patches_per_side
ul_y = ul_idx // num_patches_per_side
lr_x = lr_idx % num_patches_per_side
lr_y = lr_idx // num_patches_per_side
# Compute the normalized coordinates of the bounding box
if ul_idx == lr_idx:
x1 = ul_x * cell_size
y1 = ul_y * cell_size
x2 = lr_x * cell_size + cell_size
y2 = lr_y * cell_size + cell_size
elif ul_x == lr_x or ul_y == lr_y:
x1 = ul_x * cell_size
y1 = ul_y * cell_size
x2 = lr_x * cell_size + cell_size
y2 = lr_y * cell_size + cell_size
else:
x1 = ul_x * cell_size + cell_size / 2
y1 = ul_y * cell_size + cell_size / 2
x2 = lr_x * cell_size + cell_size / 2
y2 = lr_y * cell_size + cell_size / 2
return x1, y1, x2, y2
# copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L4-L33
# (with format modifications)
def extract_entities_with_patch_indices(text):
"""Extract entities contained in `text`. The bounding bboxes is given in the form of patch indices.
This functioin is only intended to be used within `clean_text_and_extract_entities_with_bboxes` where further
processing happens, including converting to normalized coordinates and whitespace character cleaning up.
Examples:
```python
>>> text = "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>."
>>> entities = extract_entities_with_patch_indices(text)
>>> entities
[(' a snowman', (31, 41), [(44, 863)]), (' a fire', (130, 137), [(5, 911)])]
```"""
# The regular expression pattern for matching the required formats
pattern = r"(?:(<phrase>([^<]+)</phrase>))?<object>((?:<patch_index_\d+><patch_index_\d+></delimiter_of_multi_objects/>)*<patch_index_\d+><patch_index_\d+>)</object>"
# Find all matches in the given string
matches = re.finditer(pattern, text)
# Initialize an empty list to store the valid patch_index combinations
entities_with_patch_indices = []
for match in matches:
# span of a `phrase` that is between <phrase> and </phrase>
span = match.span(2)
phrase_tag, phrase, match_content = match.groups()
if not phrase_tag:
phrase = None
# We take the starting position of `<object>`
span = (match.span(0)[0], match.span(0)[0])
# Split the match_content by the delimiter to get individual patch_index pairs
patch_index_pairs = match_content.split("</delimiter_of_multi_objects/>")
entity_bboxes = []
for pair in patch_index_pairs:
# Extract the xxxx and yyyy values from the patch_index pair
x = re.search(r"<patch_index_(\d+)>", pair)
y = re.search(r"<patch_index_(\d+)>", pair[1:])
if x and y:
if phrase:
entity_bboxes.append((int(x.group(1)), int(y.group(1))))
else:
entity_bboxes.append((int(x.group(1)), int(y.group(1))))
if phrase:
entities_with_patch_indices.append((phrase, span, entity_bboxes))
else:
for bbox in entity_bboxes:
# fake entity name
entity = f"<patch_index_{bbox[0]}><patch_index_{bbox[1]}>"
entities_with_patch_indices.append((entity, span, [bbox]))
return entities_with_patch_indices
def adjust_entity_positions(entity, text):
"""Adjust the positions of the entities in `text` to be relative to the text with special fields removed."""
entity_name, (start, end) = entity
# computed the length of strings with special fields (tag tokens, patch index tokens, etc.) removed
adjusted_start = len(re.sub("<.*?>", "", text[:start]))
adjusted_end = len(re.sub("<.*?>", "", text[:end]))
adjusted_entity = (entity_name, (adjusted_start, adjusted_end))
return adjusted_entity
def _cleanup_spaces(text, entities):
"""Remove the spaces around the text and the entities in it."""
new_text = text.strip()
leading_spaces = len(text) - len(text.lstrip())
new_entities = []
for entity_name, (start, end), bboxes in entities:
entity_name_leading_spaces = len(entity_name) - len(entity_name.lstrip())
entity_name_trailing_spaces = len(entity_name) - len(entity_name.rstrip())
start = start - leading_spaces + entity_name_leading_spaces
end = end - leading_spaces - entity_name_trailing_spaces
entity_name = entity_name.strip()
new_entities.append((entity_name, (start, end), bboxes))
return new_text, new_entities
# copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L77-L87
# (with format modifications)
def clean_text_and_extract_entities_with_bboxes(text, num_patches_per_side=32):
"""Remove the tag tokens from `text`, extract entities in it with some cleaning up of white characters.
Examples:
```python
>>> text = "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>."
>>> clean_text, entities = clean_text_and_extract_entities_with_bboxes(text)
>>> clean_text
'An image of a snowman warming himself by a fire.'
>>> entities
[('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])]
```"""
# remove special fields (tag tokens, patch index tokens, etc.)
processed_text = re.sub("<.*?>", "", text)
entities_with_patch_indices = extract_entities_with_patch_indices(text)
entities = []
for item in entities_with_patch_indices:
entity, bboxes = item[0:2], item[2]
adjusted_entity = adjust_entity_positions(entity, text)
bboxes_in_coords = [patch_index_to_coordinate(bbox[0], bbox[1], num_patches_per_side) for bbox in bboxes]
entities.append(adjusted_entity + (bboxes_in_coords,))
return _cleanup_spaces(processed_text, entities)
__all__ = ["Kosmos2Processor"]
| transformers/src/transformers/models/kosmos2/processing_kosmos2.py/0 | {
"file_path": "transformers/src/transformers/models/kosmos2/processing_kosmos2.py",
"repo_id": "transformers",
"token_count": 13844
} | 503 |
# coding=utf-8
# Copyright 2010, The Microsoft Research Asia LayoutLM Team authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LayoutLM model configuration"""
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from typing import Any, Optional
from ... import PretrainedConfig, PreTrainedTokenizer
from ...onnx import OnnxConfig, PatchingSpec
from ...utils import TensorType, is_torch_available, logging
logger = logging.get_logger(__name__)
class LayoutLMConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LayoutLMModel`]. It is used to instantiate a
LayoutLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the LayoutLM
[microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) architecture.
Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the
documentation from [`BertConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the LayoutLM model. Defines the different tokens that can be represented by the
*inputs_ids* passed to the forward method of [`LayoutLMModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed into [`LayoutLMModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
The value used to pad input_ids.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the 2D position embedding might ever used. Typically set this to something large
just in case (e.g., 1024).
Examples:
```python
>>> from transformers import LayoutLMConfig, LayoutLMModel
>>> # Initializing a LayoutLM configuration
>>> configuration = LayoutLMConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = LayoutLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "layoutlm"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
position_embedding_type="absolute",
use_cache=True,
max_2d_position_embeddings=1024,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self._position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.max_2d_position_embeddings = max_2d_position_embeddings
@property
def position_embedding_type(self):
warnings.warn(
"The `position_embedding_type` attribute is deprecated and will be removed in v4.55.",
FutureWarning,
)
return self._position_embedding_type
@position_embedding_type.setter
def position_embedding_type(self, value):
self._position_embedding_type = value
class LayoutLMOnnxConfig(OnnxConfig):
def __init__(
self,
config: PretrainedConfig,
task: str = "default",
patching_specs: Optional[list[PatchingSpec]] = None,
):
super().__init__(config, task=task, patching_specs=patching_specs)
self.max_2d_positions = config.max_2d_position_embeddings - 1
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("token_type_ids", {0: "batch", 1: "sequence"}),
]
)
def generate_dummy_inputs(
self,
tokenizer: PreTrainedTokenizer,
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
"""
Generate inputs to provide to the ONNX exporter for the specific framework
Args:
tokenizer: The tokenizer associated with this model configuration
batch_size: The batch size (int) to export the model for (-1 means dynamic axis)
seq_length: The sequence length (int) to export the model for (-1 means dynamic axis)
is_pair: Indicate if the input is a pair (sentence 1, sentence 2)
framework: The framework (optional) the tokenizer will generate tensor for
Returns:
Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
"""
input_dict = super().generate_dummy_inputs(
tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
)
# Generate a dummy bbox
box = [48, 84, 73, 128]
if not framework == TensorType.PYTORCH:
raise NotImplementedError("Exporting LayoutLM to ONNX is currently only supported for PyTorch.")
if not is_torch_available():
raise ValueError("Cannot generate dummy inputs without PyTorch installed.")
import torch
batch_size, seq_length = input_dict["input_ids"].shape
input_dict["bbox"] = torch.tensor([*[box] * seq_length]).tile(batch_size, 1, 1)
return input_dict
__all__ = ["LayoutLMConfig", "LayoutLMOnnxConfig"]
| transformers/src/transformers/models/layoutlm/configuration_layoutlm.py/0 | {
"file_path": "transformers/src/transformers/models/layoutlm/configuration_layoutlm.py",
"repo_id": "transformers",
"token_count": 3651
} | 504 |
# coding=utf-8
# Copyright 2025 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Got-OCR-2."""
import math
from collections import defaultdict
from functools import lru_cache
from typing import Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
DefaultFastImageProcessorKwargs,
group_images_by_shape,
reorder_images,
)
from ...image_utils import ImageInput, PILImageResampling, SizeDict
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
is_torch_available,
is_torchvision_available,
is_torchvision_v2_available,
)
if is_torch_available():
import torch
if is_torchvision_available():
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F
else:
from torchvision.transforms import functional as F
def get_factors(dividend: int) -> set[int]:
"""
Calculate all factors of a given number, i.e. a divisor that leaves
no remainder. For example, if dividend=12, it will return {1, 2, 3, 4, 6, 12}.
Args:
dividend (int): The number to find factors for.
Returns:
set: A set containing all factors of the number.
"""
factors_set = set()
for i in range(1, int(dividend**0.5) + 1):
if dividend % i == 0:
factors_set.add(i)
factors_set.add(dividend // i)
return factors_set
def get_max_res_without_distortion(
image_size: tuple[int, int],
target_size: tuple[int, int],
) -> tuple[int, int]:
"""
Determines the maximum resolution to which an image can be resized to without distorting its
aspect ratio, based on the target resolution.
Args:
image_size (tuple[int, int]): The original resolution of the image (height, width).
target_resolution (tuple[int, int]): The desired resolution to fit the image into (height, width).
Returns:
tuple[int, int]: The optimal dimensions (height, width) to which the image should be resized.
Example:
>>> _get_max_res_without_distortion([200, 300], target_size = [450, 200])
(134, 200)
>>> _get_max_res_without_distortion([800, 600], target_size = [450, 1300])
(450, 338)
"""
original_height, original_width = image_size
target_height, target_width = target_size
scale_w = target_width / original_width
scale_h = target_height / original_height
if scale_w < scale_h:
new_width = target_width
new_height = min(math.floor(original_height * scale_w), target_height)
else:
new_height = target_height
new_width = min(math.floor(original_width * scale_h), target_width)
return new_height, new_width
def split_to_tiles(images: torch.Tensor, num_tiles_height: int, num_tiles_width: int) -> torch.Tensor:
# Split image into number of required tiles (width x height)
batch_size, num_channels, height, width = images.size()
images = images.view(
batch_size,
num_channels,
num_tiles_height,
height // num_tiles_height,
num_tiles_width,
width // num_tiles_width,
)
# Permute dimensions to reorder the axes
image = images.permute(0, 2, 4, 1, 3, 5).contiguous()
# Reshape into the desired output shape (batch_size * 4, num_channels, width/2, height/2)
image = image.view(
batch_size,
num_tiles_width * num_tiles_height,
num_channels,
height // num_tiles_height,
width // num_tiles_width,
)
return image
@lru_cache(maxsize=1)
def find_supported_resolutions(max_num_chunks: int, patch_size: SizeDict) -> torch.Tensor:
"""
Computes all of the allowed resolutions for a fixed number of chunks
and patch_size. Useful for when dividing an image into chunks.
Args:
max_num_chunks (int): Maximum number of chunks for processing.
patch_size (int): Size of the side of the patch.
Returns:
torch.Tensor: List of possible resolutions as tuples (height, width).
Example:
>>> max_num_chunks = 5
>>> patch_size = 224
>>> find_supported_resolutions(max_num_chunks, patch_size)
tensor([(224, 896), (448, 448), (224, 224), (896, 224), (224, 672),
(672, 224), (224, 448), (448, 224)])
Given max_num_chunks=4, patch_size=224, it will create a dictionary:
{
0.25: [(1, 4)],
1.0: [(2, 2), (1, 1)],
4.0: [(4, 1)],
0.33: [(1, 3)],
3.0: [(3, 1)],
0.5: [(1, 2)],
2.0: [(2, 1)]
}
and return the resolutions multiplied by the patch_size:
[(1*224, 4*224), (2*224, 2*224), ..., (2*224, 1*224)]
"""
height, width = patch_size.height, patch_size.width
if height != width:
raise ValueError("`size` must be square.")
patch_size = height
asp_dict = defaultdict(list)
for chunk_size in range(max_num_chunks, 0, -1):
_factors = sorted(get_factors(chunk_size))
_asp_ratios = [(factor, chunk_size // factor) for factor in _factors]
for height, width in _asp_ratios:
ratio_float = height / width
asp_dict[ratio_float].append((height, width))
# get the resolutions multiplied by the patch_size
possible_resolutions = []
for value in asp_dict.values():
for height, depth in value:
possible_resolutions.append((height * patch_size, depth * patch_size))
return possible_resolutions
def pad_to_best_fit(
images: "torch.Tensor",
target_size: tuple[int, int],
background_color: Union[int, tuple[int, int, int]] = 0,
) -> "torch.Tensor":
"""
Pads an image to fit the target size.
Args:
images (`np.ndarray`):
The images to pad.
background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
The color to use for the padding. Can be an integer for single channel or a
tuple of integers representing for multi-channel images. If passed as integer
in mutli-channel mode, it will default to `0` in subsequent channels.
Returns:
`torch.Tensor`: The padded images.
"""
num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0]
if isinstance(background_color, int):
background_color = [background_color] + [0] * (num_channels - 1)
elif len(background_color) != num_channels:
raise ValueError(
f"background_color must have no more than {num_channels} elements to match the number of channels"
)
height, width = images.shape[-2:]
target_height, target_width = target_size
paste_x_right = target_width - width
paste_y_right = target_height - height
padded_images = F.pad(images, padding=[0, 0, paste_x_right, paste_y_right], fill=background_color)
return padded_images
def get_best_fit(
image_size: tuple[int, int],
possible_resolutions: torch.Tensor,
resize_to_max_canvas: bool = False,
) -> tuple[int, int]:
"""
Determines the best canvas possible from a list of possible resolutions to, without distortion,
resize an image to.
For each possible resolution, calculates the scaling factors for
width and height, and selects the smallest one, which is the limiting side.
E.g. to match the canvas you can upscale height by 2x, and width by 1.5x,
therefore, the maximum upscaling you can do is min(2, 1.5) = 1.5.
If upscaling is possible (any of the scaling factors is greater than 1),
then picks the smallest upscaling factor > 1, unless resize_to_max_canvas is True.
If upscaling is not possible, then picks the largest scaling factor <= 1, i.e.
reduce downscaling as much as possible.
If there are multiple resolutions with the same max scale, we pick the one with the lowest area,
to minimize padding. E.g., the same image can be upscaled to 224x224 and 224x448, but the latter
has more padding.
Args:
image_size (tuple[int, int]): A tuple containing the height and width of the image.
possible_resolutions (torch.Tensor): A tensor of shape (N, 2) where each
row represents a possible resolution (height, width).
resize_to_max_canvas (bool): If True, will return the largest upscaling resolution.
Returns:
list[int]: The best resolution [height, width] for the given image.
Example:
>>> image_size = (200, 300)
>>> possible_resolutions = torch.tensor([[224, 672],
... [672, 224],
... [224, 448],
... [448, 224],
... [224, 224]])
>>> get_best_fit(image_size, possible_resolutions)
[224, 448]
We have:
scale_w = tensor([2.2400, 0.7467, 1.4933, 0.7467, 0.7467])
scale_h = tensor([1.1200, 3.3600, 1.1200, 2.2400, 1.1200])
scales = tensor([1.1200, 0.7467, 1.1200, 0.7467, 0.7467])
Only one of the scales > 1:
upscaling_possible = tensor([1.1200, 1.1200])
smallest_rescale = tensor(1.1200)
So we pick the resolution with the smallest smallest area:
areas = tensor([150528, 100352]) # [672, 224], [224, 448]
optimal_canvas = tensor([224, 448])
"""
original_height, original_width = image_size
# get all possible resolutions heights/widths
target_heights, target_widths = (
possible_resolutions[:, 0],
possible_resolutions[:, 1],
)
# get scaling factors to resize the image without distortion
scale_w = target_widths / original_width
scale_h = target_heights / original_height
# get the min scale between width and height (limiting side -> no distortion)
scales = torch.where(scale_h > scale_w, scale_w, scale_h)
# filter only scales that allow upscaling
upscaling_options = scales[scales >= 1]
if len(upscaling_options) > 0:
if resize_to_max_canvas:
selected_scale = torch.max(upscaling_options)
else:
selected_scale = torch.min(upscaling_options)
else:
# no upscaling possible,
# get the minimum downscaling (max scale for scales<1)
downscaling_options = scales[scales < 1]
selected_scale = torch.max(downscaling_options)
# get all resolutions that support this scaling factor,
# e.g. you can upscale to 224x224, 224x448, 224x672 without distortion
chosen_canvas = possible_resolutions[scales == selected_scale]
# if there are multiple resolutions,
# get the one with minimum area to reduce padding
if len(chosen_canvas) > 1:
areas = chosen_canvas[:, 0] * chosen_canvas[:, 1]
optimal_idx = torch.argmin(areas)
optimal_canvas = chosen_canvas[optimal_idx]
else:
optimal_canvas = chosen_canvas[0]
return optimal_canvas
class Llama4ImageProcessorKwargs(DefaultFastImageProcessorKwargs):
"""
max_patches (`int`, *optional*, defaults to 16):
The maximum number of patches to be extracted from the image.
Can be overridden by the `max_patches` parameter in the `preprocess` method.
resize_to_max_canvas (`bool`, *optional*, defaults to False):
Whether to resize the image to the maximum canvas size.
If True, picks the canvas the allows the largest resizing without distortion.
If False, downsample as little as possible, including no resizing at all,
but never upsample, unless the image is smaller than the patch size.
"""
max_patches: Optional[int]
resize_to_max_canvas: Optional[bool]
@auto_docstring
class Llama4ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = [0.5, 0.5, 0.5]
image_std = [0.5, 0.5, 0.5]
size = {"height": 336, "width": 336}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
max_patches = 16
resize_to_max_canvas = False
valid_kwargs = Llama4ImageProcessorKwargs
def __init__(self, **kwargs: Unpack[Llama4ImageProcessorKwargs]):
super().__init__(**kwargs)
# Disable compilation here as conversion to bfloat16 causes differences in the output of the compiled and non-compiled versions
@torch.compiler.disable
def rescale_and_normalize(
self,
images: "torch.Tensor",
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Union[float, list[float]],
image_std: Union[float, list[float]],
) -> "torch.Tensor":
"""
Rescale and normalize images.
Override to rescale and normalize the images in torch.bfloat16 as in the original implementation
"""
if do_rescale and do_normalize:
images = images.to(dtype=torch.bfloat16) * rescale_factor
images = self.normalize(images, image_mean, image_std)
elif do_rescale:
images = images * rescale_factor
elif do_normalize:
images = self.normalize(images, image_mean, image_std)
return images
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[Llama4ImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
size: SizeDict,
max_patches: int,
resize_to_max_canvas: bool,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
possible_resolutions = find_supported_resolutions(max_num_chunks=max_patches, patch_size=size)
possible_resolutions = torch.tensor(possible_resolutions, device=images[0].device)
# process images by batch, grouped by shape
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
grouped_processed_images = {}
grouped_aspect_ratios = {}
for shape, stacked_images in grouped_images.items():
image_size = stacked_images.shape[-2:]
target_size = get_best_fit(image_size, possible_resolutions, resize_to_max_canvas=resize_to_max_canvas)
# If target_size requires upscaling, we might want to limit the upscaling to max_upscaling_size
max_upscaling_size = None if resize_to_max_canvas else size.height
if max_upscaling_size is not None:
new_target_height = min(max(image_size[0], max_upscaling_size), target_size[0])
new_target_width = min(max(image_size[1], max_upscaling_size), target_size[1])
target_size_without_distortion = (new_target_height, new_target_width)
# resize to target_size while preserving aspect ratio
new_size_without_distortion = get_max_res_without_distortion(image_size, target_size_without_distortion)
new_size_without_distortion = SizeDict(
height=max(new_size_without_distortion[0], 1), width=max(new_size_without_distortion[1], 1)
)
processed_images = self.resize(
stacked_images,
new_size_without_distortion,
interpolation=interpolation,
)
# pad to target_size to be able to split into tiles
processed_images = pad_to_best_fit(processed_images, target_size)
processed_images = self.rescale_and_normalize(
processed_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
ratio_h, ratio_w = (
target_size[0] // size.height,
target_size[1] // size.width,
)
# split into tiles
processed_images = split_to_tiles(processed_images, ratio_h, ratio_w)
grouped_processed_images[shape] = processed_images
grouped_aspect_ratios[shape] = torch.tensor(
[[ratio_h, ratio_w]] * stacked_images.shape[0], device=images[0].device
)
# add a global tile to the processed tile if there are more than one tile
if ratio_h * ratio_w > 1:
global_tiles = self.resize(
stacked_images,
size,
interpolation=interpolation,
)
global_tiles = self.rescale_and_normalize(
global_tiles, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
grouped_processed_images[shape] = torch.cat([processed_images, global_tiles.unsqueeze(1)], dim=1)
processed_images = reorder_images(grouped_processed_images, grouped_images_index)
aspect_ratios_list = reorder_images(grouped_aspect_ratios, grouped_images_index)
processed_images = torch.cat(processed_images, dim=0) if return_tensors else processed_images
aspect_ratios = torch.stack(aspect_ratios_list, dim=0) if return_tensors else aspect_ratios_list
return BatchFeature(
data={"pixel_values": processed_images, "aspect_ratios": aspect_ratios}, tensor_type=return_tensors
)
__all__ = ["Llama4ImageProcessorFast"]
| transformers/src/transformers/models/llama4/image_processing_llama4_fast.py/0 | {
"file_path": "transformers/src/transformers/models/llama4/image_processing_llama4_fast.py",
"repo_id": "transformers",
"token_count": 7453
} | 505 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for LLaVa-NeXT.
"""
from typing import Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_processing_utils import select_best_resolution
from ...image_utils import ImageInput, get_image_size, to_numpy_array
from ...processing_utils import (
MultiModalData,
ProcessingKwargs,
ProcessorMixin,
Unpack,
)
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import logging
logger = logging.get_logger(__name__)
class LlavaNextProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"images_kwargs": {
"do_pad": True,
},
}
class LlavaNextProcessor(ProcessorMixin):
r"""
Constructs a LLaVa-NeXT processor which wraps a LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor.
[`LlavaNextProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information.
Args:
image_processor ([`LlavaNextImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
patch_size (`int`, *optional*):
Patch size from the vision tower.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Should be same as in model's config
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
image_token (`str`, *optional*, defaults to `"<image>"`):
Special token used to denote image location.
num_additional_image_tokens (`int`, *optional*, defaults to 0):
Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other
extra tokens appended, no need to set this arg.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(
self,
image_processor=None,
tokenizer=None,
patch_size=None,
vision_feature_select_strategy=None,
chat_template=None,
image_token="<image>", # set the default and let users change if they have peculiar special tokens in rare cases
num_additional_image_tokens=0,
**kwargs,
):
self.patch_size = patch_size
self.num_additional_image_tokens = num_additional_image_tokens
self.vision_feature_select_strategy = vision_feature_select_strategy
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
images: ImageInput = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
audio=None,
videos=None,
**kwargs: Unpack[LlavaNextProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if images is None and text is None:
raise ValueError("You have to specify at least images or text.")
output_kwargs = self._merge_kwargs(
LlavaNextProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
else:
image_inputs = {}
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
prompt_strings = text
if image_inputs:
image_sizes = iter(image_inputs["image_sizes"])
height, width = get_image_size(to_numpy_array(image_inputs["pixel_values"][0][0]))
prompt_strings = []
for sample in text:
while self.image_token in sample:
image_size = next(image_sizes)
if not isinstance(image_size, (list, tuple)):
# cast to list to avoid numerical precision errors when calculating unpadding
image_size = image_size.tolist()
orig_height, orig_width = image_size
num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
sample = sample.replace(self.image_token, "<placeholder>" * num_image_tokens, 1)
prompt_strings.append(sample)
prompt_strings = [sample.replace("<placeholder>", self.image_token) for sample in prompt_strings]
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int:
image_grid_pinpoints = self.image_processor.image_grid_pinpoints
height_best_resolution, width_best_resolution = select_best_resolution(
[orig_height, orig_width], image_grid_pinpoints
)
scale_height, scale_width = height_best_resolution // height, width_best_resolution // width
patches_height = height // self.patch_size
patches_width = width // self.patch_size
unpadded_features, newline_features = self._get_unpadded_features(
orig_height, orig_width, patches_height, patches_width, scale_height, scale_width
)
# The base patch covers the entire image (+1 for the CLS)
base_features = patches_height * patches_width + self.num_additional_image_tokens
num_image_tokens = unpadded_features + newline_features + base_features
return num_image_tokens
def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width):
"""
Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA
because it divided each image into patches depending on its resolution. Therefore we need to calculate how many
patches an image is divided into and get the number of features from that.
"""
current_height = patches_height * scale_height
current_width = patches_width * scale_width
original_aspect_ratio = width / height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
new_height = int(round(height * (current_width / width), 7))
padding = (current_height - new_height) // 2
current_height -= padding * 2
else:
new_width = int(round(width * (current_height / height), 7))
padding = (current_width - new_width) // 2
current_width -= padding * 2
unpadded_features = current_height * current_width
newline_features = current_height
return (unpadded_features, newline_features)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (list[list[str]], *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = LlavaNextProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
size = images_kwargs.get("size", None) or self.image_processor.size
size = (
(size["shortest_edge"], size["shortest_edge"])
if "shortest_edge" in size
else (min(size["height"], size["width"]), min(size["height"], size["width"]))
)
processed_height, processed_width = size
batch_num_image_tokens = []
num_image_patches = [1] * len(image_sizes) # llava-next doesn't batch pixels as Idefics, thus `1` patch`
for image_size in image_sizes:
orig_height, orig_width = image_size
num_image_tokens = self._get_number_of_features(
orig_height, orig_width, processed_height, processed_width
)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
batch_num_image_tokens.append(num_image_tokens)
vision_data.update({"num_image_tokens": batch_num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["LlavaNextProcessor"]
| transformers/src/transformers/models/llava_next/processing_llava_next.py/0 | {
"file_path": "transformers/src/transformers/models/llava_next/processing_llava_next.py",
"repo_id": "transformers",
"token_count": 5227
} | 506 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for LLaVa-Onevision.
"""
import math
from collections.abc import Iterable
from typing import Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_processing_utils import select_best_resolution
from ...image_utils import ImageInput, get_image_size, to_numpy_array
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import logging
from ...video_utils import VideoInput
logger = logging.get_logger(__name__)
class LlavaOnevisionProcessorKwargs(ProcessingKwargs, total=False):
# see processing_utils.ProcessingKwargs documentation for usage.
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"image_kwargs": {},
"videos_kwargs": {},
}
class LlavaOnevisionProcessor(ProcessorMixin):
r"""
Constructs a LLaVa-Onevision processor which wraps a LLaVa-Onevision video processor, LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor.
[`LlavaNextProcessor`] offers all the functionalities of [`LlavaOnevisionVideoProcessor`], [`LlavaOnevisionImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~LlavaOnevisionVideoProcessor.__call__`], [`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information.
Args:
image_processor ([`LlavaOnevisionImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`LlavaOnevisionVideoProcessor`], *optional*):
The video processor is a required input.
num_image_tokens (`int`, *optional*):
Number of image tokens for one imagethat will be returned by vision tower.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Should be same as in model's config
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
image_token (`str`, *optional*, defaults to `"<image>"`):
Special token used to denote image location.
video_token (`str`, *optional*, defaults to `"<video>"`):
Special token used to denote video location.
vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`):
Aspect ratio used when processong image features. The default value is "anyres_max_9".
"""
attributes = ["image_processor", "tokenizer", "video_processor"]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"
video_processor_class = "AutoVideoProcessor"
def __init__(
self,
image_processor=None,
tokenizer=None,
video_processor=None,
num_image_tokens=None,
vision_feature_select_strategy=None,
chat_template=None,
image_token="<image>",
video_token="<video>",
vision_aspect_ratio="anyres_max_9",
**kwargs,
):
self.num_image_tokens = num_image_tokens
self.vision_feature_select_strategy = vision_feature_select_strategy
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
self.video_token = tokenizer.video_token if hasattr(tokenizer, "video_token") else video_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
self.video_token_id = (
tokenizer.video_token_id
if getattr(tokenizer, "video_token_id", None)
else tokenizer.convert_tokens_to_ids(self.video_token)
)
self.vision_aspect_ratio = vision_aspect_ratio
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
def __call__(
self,
images: ImageInput = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
audio=None,
videos: VideoInput = None,
**kwargs: Unpack[LlavaOnevisionProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of a video input to be fed to a model. Returned when `videos` is not `None`.
- **image_sizes** -- Size of each image that will be used to unpad an image. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
LlavaOnevisionProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
image_inputs = video_inputs = {}
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
batch_num_images = iter(image_inputs["batch_num_images"])
image_sizes = iter(image_inputs["image_sizes"])
height, width = get_image_size(
to_numpy_array(image_inputs["pixel_values"][0][0]),
channel_dim=output_kwargs["images_kwargs"].get("data_format"),
)
text, num_image_tokens = self._expand_image_tokens(
text, image_sizes, height, width, self.image_token, batch_num_images
)
if videos is not None:
video_inputs = self.video_processor(videos, **output_kwargs["videos_kwargs"])
one_video = video_inputs.get("pixel_values_videos")[0]
if isinstance(video_inputs.get("pixel_values_videos")[0], (list, tuple)):
one_video = np.array(one_video)
else:
one_video = to_numpy_array(one_video)
height, width = get_image_size(one_video[0], channel_dim=output_kwargs["images_kwargs"].get("data_format"))
num_frames = one_video.shape[0] # frame dim is always after batch dim
patches_height_width = int(math.sqrt(self.num_image_tokens))
pooled_height_width = math.ceil(patches_height_width / 2)
num_video_tokens = (num_frames * pooled_height_width * pooled_height_width) + 1 # +1 for newline token
text = [sample.replace(self.video_token, self.video_token * num_video_tokens) for sample in text]
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **video_inputs}, tensor_type=return_tensors)
def _expand_image_tokens(
self,
text: list[TextInput],
image_sizes: Iterable[Union[list[int], int]],
height: int,
width: int,
special_token: str,
batch_num_images: Iterable[int],
):
prompt_strings = []
max_num_vision_tokens = 0
for sample in text:
if special_token in sample:
num_images = next(batch_num_images) # should consume iterable
is_multi_image = num_images != 1
else:
is_multi_image = False
while special_token in sample:
original_size = next(image_sizes) # should consume iterable
if is_multi_image:
num_image_tokens = self.num_image_tokens + 1 # one for image_newline
else:
if not isinstance(original_size, (list, tuple)):
# cast to list to avoid numerical precision errors when calculating unpadding
original_size = original_size.tolist()
orig_height, orig_width = original_size
num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width)
max_num_vision_tokens = max(max_num_vision_tokens, num_image_tokens)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
sample = sample.replace(special_token, "<placeholder>" * num_image_tokens, 1)
prompt_strings.append(sample)
text = [sample.replace("<placeholder>", special_token) for sample in prompt_strings]
return text, max_num_vision_tokens
def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int:
image_grid_pinpoints = self.image_processor.image_grid_pinpoints
height_best_resolution, width_best_resolution = select_best_resolution(
[orig_height, orig_width], image_grid_pinpoints
)
scale_height, scale_width = height_best_resolution // height, width_best_resolution // width
patches_height = patches_width = int(math.sqrt(self.num_image_tokens))
unpadded_features, newline_features = self._get_unpadded_features(
orig_height, orig_width, patches_height, patches_width, scale_height, scale_width
)
# The base patch covers the entire image (no CLS for SigLIP)
base_features = self.num_image_tokens
num_image_tokens = unpadded_features + newline_features + base_features
return num_image_tokens
# Adapted from transformers.models.llava_next.processing_llava_next.LlavaNextProcessor._get_unpadded_features
def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width):
"""
Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA
because it divided each image into patches depending on its resolution. Therefore we need to calculate how many
patches an image is divided into and get the number of features from that.
"""
current_height = patches_height * scale_height
current_width = patches_width * scale_width
original_aspect_ratio = width / height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
new_height = int(round(height * (current_width / width), 7))
padding = (current_height - new_height) // 2
current_height -= padding * 2
else:
new_width = int(round(width * (current_height / height), 7))
padding = (current_width - new_width) // 2
current_width -= padding * 2
unpadded_features = current_height * current_width
newline_features = current_height
max_num_patches = int(self.vision_aspect_ratio.strip("anyres_max_"))
ratio = math.sqrt(current_height * current_width / (max_num_patches * patches_height**2))
if ratio > 1.1:
unpadded_features = int(current_height // ratio) * int(current_width // ratio)
newline_features = int(current_height // ratio)
return (unpadded_features, newline_features)
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (list[list[str]], *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (list[list[str]], *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
audio_lengths (list[int], *optional*):
The input length formatted as per each audio.
Returns:
dict[str, list[int]]: A dictionary mapping each modality ("image", "video", "audio")
to a list containing the number of placeholder tokens required. If the model doesn't accept
a certain modality or no input sizes are provided, the dict value is set to an empty list.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = LlavaOnevisionProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
size = images_kwargs.get("size", None) or self.image_processor.size
size = (
(size["shortest_edge"], size["shortest_edge"])
if "shortest_edge" in size
else (min(size["height"], size["width"]), min(size["height"], size["width"]))
)
processed_height, processed_width = size
batch_num_image_tokens = []
num_image_patches = [1] * len(image_sizes) # llava-ov doesn't batch pixels as Idefics, thus `1` patch`
for image_size in image_sizes:
orig_height, orig_width = image_size
num_image_tokens = self._get_number_of_features(
orig_height, orig_width, processed_height, processed_width
)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
batch_num_image_tokens.append(num_image_tokens)
vision_data.update({"num_image_tokens": batch_num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["LlavaOnevisionProcessor"]
| transformers/src/transformers/models/llava_onevision/processing_llava_onevision.py/0 | {
"file_path": "transformers/src/transformers/models/llava_onevision/processing_llava_onevision.py",
"repo_id": "transformers",
"token_count": 6979
} | 507 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert LUKE checkpoint."""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def convert_luke_checkpoint(checkpoint_path, metadata_path, entity_vocab_path, pytorch_dump_folder_path, model_size):
# Load configuration defined in the metadata file
with open(metadata_path) as metadata_file:
metadata = json.load(metadata_file)
config = LukeConfig(use_entity_aware_attention=True, **metadata["model_config"])
# Load in the weights from the checkpoint_path
state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)
# Load the entity vocab file
entity_vocab = load_entity_vocab(entity_vocab_path)
tokenizer = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"])
# Add special tokens to the token vocabulary for downstream tasks
entity_token_1 = AddedToken("<ent>", lstrip=False, rstrip=False)
entity_token_2 = AddedToken("<ent2>", lstrip=False, rstrip=False)
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_1, entity_token_2]})
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}")
tokenizer.save_pretrained(pytorch_dump_folder_path)
with open(os.path.join(pytorch_dump_folder_path, LukeTokenizer.vocab_files_names["entity_vocab_file"]), "w") as f:
json.dump(entity_vocab, f)
tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path)
# Initialize the embeddings of the special tokens
word_emb = state_dict["embeddings.word_embeddings.weight"]
ent_emb = word_emb[tokenizer.convert_tokens_to_ids(["@"])[0]].unsqueeze(0)
ent2_emb = word_emb[tokenizer.convert_tokens_to_ids(["#"])[0]].unsqueeze(0)
state_dict["embeddings.word_embeddings.weight"] = torch.cat([word_emb, ent_emb, ent2_emb])
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers):
for matrix_name in ["query.weight", "query.bias"]:
prefix = f"encoder.layer.{layer_index}.attention.self."
state_dict[prefix + "w2e_" + matrix_name] = state_dict[prefix + matrix_name]
state_dict[prefix + "e2w_" + matrix_name] = state_dict[prefix + matrix_name]
state_dict[prefix + "e2e_" + matrix_name] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
entity_emb = state_dict["entity_embeddings.entity_embeddings.weight"]
entity_emb[entity_vocab["[MASK2]"]] = entity_emb[entity_vocab["[MASK]"]]
model = LukeModel(config=config).eval()
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
if not (len(missing_keys) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(missing_keys)}. Expected only missing embeddings.position_ids")
if not (all(key.startswith("entity_predictions") or key.startswith("lm_head") for key in unexpected_keys)):
raise ValueError(
"Unexpected keys"
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions') or key.startswith('lm_head'))])}"
)
# Check outputs
tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path, task="entity_classification")
text = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
span = (39, 42)
encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt")
outputs = model(**encoding)
# Verify word hidden states
if model_size == "large":
expected_shape = torch.Size((1, 42, 1024))
expected_slice = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]]
)
else: # base
expected_shape = torch.Size((1, 42, 768))
expected_slice = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]])
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}"
)
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4):
raise ValueError
# Verify entity hidden states
if model_size == "large":
expected_shape = torch.Size((1, 1, 1024))
expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]])
else: # base
expected_shape = torch.Size((1, 1, 768))
expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]])
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}"
)
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print(f"Saving PyTorch model to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
def load_entity_vocab(entity_vocab_path):
entity_vocab = {}
with open(entity_vocab_path, "r", encoding="utf-8") as f:
for index, line in enumerate(f):
title, _ = line.rstrip().split("\t")
entity_vocab[title] = index
return entity_vocab
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
args = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| transformers/src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 2883
} | 508 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAMBA configuration"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class MambaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MambaModel`]. It is used to instantiate a MAMBA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MAMBA
[state-spaces/mamba-2.8b](https://huggingface.co/state-spaces/mamba-2.8b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50280):
Vocabulary size of the MAMBA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MambaModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
state_size (`int`, *optional*, defaults to 16): shape of the state space latents.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the model.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
The id of the beginning of sentence token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 0):
The id of the end of sentence token in the vocabulary.
expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.
use_bias (`bool`, *optional*, defaults to `False`):
Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block
use_conv_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use bias in the convolution layer of the mixer block.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.1):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
residual_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model
time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
time_step_scale (`float`, *optional*, defaults to 1.0):
Scale used used to scale `dt_proj.bias`.
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj.bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj.bias`.
time_step_init_scheme (`float`, *optional*, defaults to `"random"`):
Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]`
time_step_floor (`float`, *optional*, defaults to 0.0001):
Minimum clamping value of the `dt_proj.bias` layer initialization.
rescale_prenorm_residual (`bool`, *optional*, defaults to `False`):
Whether or not to rescale `out_proj` weights when initializing.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the cache should be used.
use_mambapy (`bool`, *optional*, defaults to `False`):
Determines the fallback strategy during training if the CUDA-based official implementation of Mamba is not available. If `True`, the mamba.py implementation is used. If `False`, the naive and slower implementation is used. Consider switching to the naive version if memory is limited.
Example:
```python
>>> from transformers import MambaConfig, MambaModel
>>> # Initializing a Mamba configuration
>>> configuration = MambaConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = MambaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mamba"
def __init__(
self,
vocab_size=50280,
hidden_size=768,
state_size=16,
num_hidden_layers=32,
layer_norm_epsilon=1e-5,
pad_token_id=0,
bos_token_id=0,
eos_token_id=0,
expand=2,
conv_kernel=4,
use_bias=False,
use_conv_bias=True,
hidden_act="silu",
initializer_range=0.1,
residual_in_fp32=True,
time_step_rank="auto",
time_step_scale=1.0,
time_step_min=0.001,
time_step_max=0.1,
time_step_init_scheme="random",
time_step_floor=1e-4,
rescale_prenorm_residual=False,
use_cache=True,
use_mambapy=False,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.state_size = state_size
self.num_hidden_layers = num_hidden_layers
self.layer_norm_epsilon = layer_norm_epsilon
self.conv_kernel = conv_kernel
self.expand = expand
self.intermediate_size = int(expand * self.hidden_size)
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.use_bias = use_bias
self.use_conv_bias = use_conv_bias
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == "auto" else time_step_rank
self.time_step_scale = time_step_scale
self.time_step_min = time_step_min
self.time_step_max = time_step_max
self.time_step_init_scheme = time_step_init_scheme
self.time_step_floor = time_step_floor
self.rescale_prenorm_residual = rescale_prenorm_residual
self.residual_in_fp32 = residual_in_fp32
self.use_cache = use_cache
self.use_mambapy = use_mambapy
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
__all__ = ["MambaConfig"]
| transformers/src/transformers/models/mamba/configuration_mamba.py/0 | {
"file_path": "transformers/src/transformers/models/mamba/configuration_mamba.py",
"repo_id": "transformers",
"token_count": 2895
} | 509 |
# coding=utf-8
# Copyright 2021, The Microsoft Research Asia MarkupLM Team authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MarkupLM model configuration"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class MarkupLMConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MarkupLMModel`]. It is used to instantiate a
MarkupLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the MarkupLM
[microsoft/markuplm-base](https://huggingface.co/microsoft/markuplm-base) architecture.
Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the
documentation from [`BertConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the MarkupLM model. Defines the different tokens that can be represented by the
*inputs_ids* passed to the forward method of [`MarkupLMModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed into [`MarkupLMModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
max_tree_id_unit_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the tree id unit embedding might ever use. Typically set this to something large
just in case (e.g., 1024).
max_xpath_tag_unit_embeddings (`int`, *optional*, defaults to 256):
The maximum value that the xpath tag unit embedding might ever use. Typically set this to something large
just in case (e.g., 256).
max_xpath_subs_unit_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the xpath subscript unit embedding might ever use. Typically set this to something
large just in case (e.g., 1024).
tag_pad_id (`int`, *optional*, defaults to 216):
The id of the padding token in the xpath tags.
subs_pad_id (`int`, *optional*, defaults to 1001):
The id of the padding token in the xpath subscripts.
xpath_tag_unit_hidden_size (`int`, *optional*, defaults to 32):
The hidden size of each tree id unit. One complete tree index will have
(50*xpath_tag_unit_hidden_size)-dim.
max_depth (`int`, *optional*, defaults to 50):
The maximum depth in xpath.
Examples:
```python
>>> from transformers import MarkupLMModel, MarkupLMConfig
>>> # Initializing a MarkupLM microsoft/markuplm-base style configuration
>>> configuration = MarkupLMConfig()
>>> # Initializing a model from the microsoft/markuplm-base style configuration
>>> model = MarkupLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "markuplm"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
bos_token_id=0,
eos_token_id=2,
max_xpath_tag_unit_embeddings=256,
max_xpath_subs_unit_embeddings=1024,
tag_pad_id=216,
subs_pad_id=1001,
xpath_unit_hidden_size=32,
max_depth=50,
position_embedding_type="absolute",
use_cache=True,
classifier_dropout=None,
**kwargs,
):
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self._position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
# additional properties
self.max_depth = max_depth
self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings
self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings
self.tag_pad_id = tag_pad_id
self.subs_pad_id = subs_pad_id
self.xpath_unit_hidden_size = xpath_unit_hidden_size
@property
def position_embedding_type(self):
warnings.warn(
"The `position_embedding_type` attribute is deprecated and will be removed in v4.55.",
FutureWarning,
)
return self._position_embedding_type
@position_embedding_type.setter
def position_embedding_type(self, value):
self._position_embedding_type = value
__all__ = ["MarkupLMConfig"]
| transformers/src/transformers/models/markuplm/configuration_markuplm.py/0 | {
"file_path": "transformers/src/transformers/models/markuplm/configuration_markuplm.py",
"repo_id": "transformers",
"token_count": 3005
} | 510 |
# coding=utf-8
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from argparse import ArgumentParser
from collections.abc import Iterator
from dataclasses import dataclass
from pathlib import Path
from pprint import pformat
from typing import Any
import requests
import torch
import torchvision.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.projects.deeplab import add_deeplab_config
from PIL import Image
from torch import Tensor, nn
from transformers.models.maskformer.feature_extraction_maskformer import MaskFormerImageProcessor
from transformers.models.maskformer.modeling_maskformer import (
MaskFormerConfig,
MaskFormerForInstanceSegmentation,
MaskFormerForInstanceSegmentationOutput,
MaskFormerModel,
MaskFormerModelOutput,
)
from transformers.utils import logging
StateDict = dict[str, Tensor]
logging.set_verbosity_info()
logger = logging.get_logger()
torch.manual_seed(0)
class TrackedStateDict:
def __init__(self, to_track: dict):
"""This class "tracks" a python dictionary by keeping track of which item is accessed.
Args:
to_track (Dict): The dictionary we wish to track
"""
self.to_track = to_track
self._seen: set[str] = set()
def __getitem__(self, key: str) -> Any:
return self.to_track[key]
def __setitem__(self, key: str, item: Any):
self._seen.add(key)
self.to_track[key] = item
def diff(self) -> list[str]:
"""This method returns a set difference between the keys in the tracked state dict and the one we have access so far.
This is an effective method to check if we have update all the keys
Returns:
list[str]: List of keys not yet updated
"""
return set(self.to_track.keys()) - self._seen
def copy(self) -> dict:
# proxy the call to the internal dictionary
return self.to_track.copy()
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
img_data = requests.get(url, stream=True).raw
im = Image.open(img_data)
return im
@dataclass
class Args:
"""Fake command line arguments needed by maskformer/detectron implementation"""
config_file: str
def setup_cfg(args: Args):
# load config from file and command-line arguments
cfg = get_cfg()
add_deeplab_config(cfg)
add_mask_former_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.freeze()
return cfg
class OriginalMaskFormerConfigToOursConverter:
def __call__(self, original_config: object) -> MaskFormerConfig:
model = original_config.MODEL
mask_former = model.MASK_FORMER
swin = model.SWIN
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0])
id2label = dict(enumerate(dataset_catalog.stuff_classes))
label2id = {label: idx for idx, label in id2label.items()}
config: MaskFormerConfig = MaskFormerConfig(
fpn_feature_size=model.SEM_SEG_HEAD.CONVS_DIM,
mask_feature_size=model.SEM_SEG_HEAD.MASK_DIM,
num_labels=model.SEM_SEG_HEAD.NUM_CLASSES,
no_object_weight=mask_former.NO_OBJECT_WEIGHT,
num_queries=mask_former.NUM_OBJECT_QUERIES,
backbone_config={
"pretrain_img_size": swin.PRETRAIN_IMG_SIZE,
"image_size": swin.PRETRAIN_IMG_SIZE,
"in_channels": 3,
"patch_size": swin.PATCH_SIZE,
"embed_dim": swin.EMBED_DIM,
"depths": swin.DEPTHS,
"num_heads": swin.NUM_HEADS,
"window_size": swin.WINDOW_SIZE,
"drop_path_rate": swin.DROP_PATH_RATE,
"model_type": "swin",
},
dice_weight=mask_former.DICE_WEIGHT,
ce_weight=1.0,
mask_weight=mask_former.MASK_WEIGHT,
decoder_config={
"model_type": "detr",
"max_position_embeddings": 1024,
"encoder_layers": 6,
"encoder_ffn_dim": 2048,
"encoder_attention_heads": 8,
"decoder_layers": mask_former.DEC_LAYERS,
"decoder_ffn_dim": mask_former.DIM_FEEDFORWARD,
"decoder_attention_heads": mask_former.NHEADS,
"encoder_layerdrop": 0.0,
"decoder_layerdrop": 0.0,
"d_model": mask_former.HIDDEN_DIM,
"dropout": mask_former.DROPOUT,
"attention_dropout": 0.0,
"activation_dropout": 0.0,
"init_std": 0.02,
"init_xavier_std": 1.0,
"scale_embedding": False,
"auxiliary_loss": False,
"dilation": False,
# default pretrained config values
},
id2label=id2label,
label2id=label2id,
)
return config
class OriginalMaskFormerConfigToImageProcessorConverter:
def __call__(self, original_config: object) -> MaskFormerImageProcessor:
model = original_config.MODEL
model_input = original_config.INPUT
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0])
return MaskFormerImageProcessor(
image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(),
image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(),
size=model_input.MIN_SIZE_TEST,
max_size=model_input.MAX_SIZE_TEST,
num_labels=model.SEM_SEG_HEAD.NUM_CLASSES,
ignore_index=dataset_catalog.ignore_label,
size_divisibility=32, # 32 is required by swin
)
class OriginalMaskFormerCheckpointToOursConverter:
def __init__(self, original_model: nn.Module, config: MaskFormerConfig):
self.original_model = original_model
self.config = config
def pop_all(self, renamed_keys: list[tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
for src_key, dst_key in renamed_keys:
dst_state_dict[dst_key] = src_state_dict.pop(src_key)
def replace_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: MaskFormerConfig):
dst_prefix: str = "pixel_level_module.encoder"
src_prefix: str = "backbone"
renamed_keys = [
(
f"{src_prefix}.patch_embed.proj.weight",
f"{dst_prefix}.model.embeddings.patch_embeddings.projection.weight",
),
(f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.bias"),
(f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.model.embeddings.norm.weight"),
(f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.model.embeddings.norm.bias"),
]
num_layers = len(config.backbone_config.depths)
for layer_idx in range(num_layers):
for block_idx in range(config.backbone_config.depths[layer_idx]):
renamed_keys.extend(
[ # src, dst
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table",
),
]
)
# now we need to handle the attentions
# read in weights + bias of input projection layer of cross-attention
src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"]
src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"]
size = src_att_weight.shape[0]
offset = size // 3
dst_state_dict[
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight"
] = src_att_weight[:offset, :]
dst_state_dict[
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias"
] = src_att_bias[:offset]
dst_state_dict[
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight"
] = src_att_weight[offset : offset * 2, :]
dst_state_dict[
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias"
] = src_att_bias[offset : offset * 2]
dst_state_dict[
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight"
] = src_att_weight[-offset:, :]
dst_state_dict[
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias"
] = src_att_bias[-offset:]
# let's pop them
src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight")
src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias")
# proj
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias",
),
]
)
# second norm
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias",
),
]
)
# mlp
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias",
),
]
)
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index",
)
]
)
if layer_idx < num_layers - 1:
# patch merging
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.reduction.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias",
f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.bias",
),
]
)
# hidden states norms
renamed_keys.extend(
[
(
f"{src_prefix}.norm{layer_idx}.weight",
f"{dst_prefix}.hidden_states_norms.{layer_idx}.weight",
),
(
f"{src_prefix}.norm{layer_idx}.bias",
f"{dst_prefix}.hidden_states_norms.{layer_idx}.bias",
),
]
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "pixel_level_module.decoder"
src_prefix: str = "sem_seg_head.pixel_decoder"
self.replace_backbone(dst_state_dict, src_state_dict, self.config)
def rename_keys_for_conv(detectron_conv: str, mine_conv: str):
return [
(f"{detectron_conv}.weight", f"{mine_conv}.0.weight"),
# 2 cuz the have act in the middle -> rename it
(f"{detectron_conv}.norm.weight", f"{mine_conv}.1.weight"),
(f"{detectron_conv}.norm.bias", f"{mine_conv}.1.bias"),
]
renamed_keys = [
(f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"),
(f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"),
# the layers in the original one are in reverse order, stem is the last one!
]
renamed_keys.extend(rename_keys_for_conv(f"{src_prefix}.layer_4", f"{dst_prefix}.fpn.stem"))
# add all the fpn layers (here we need some config parameters to know the size in advance)
for src_i, dst_i in zip(range(3, 0, -1), range(0, 3)):
renamed_keys.extend(
rename_keys_for_conv(f"{src_prefix}.adapter_{src_i}", f"{dst_prefix}.fpn.layers.{dst_i}.proj")
)
renamed_keys.extend(
rename_keys_for_conv(f"{src_prefix}.layer_{src_i}", f"{dst_prefix}.fpn.layers.{dst_i}.block")
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def rename_keys_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "transformer_module.decoder"
src_prefix: str = "sem_seg_head.predictor.transformer.decoder"
# not sure why we are not popping direcetly here!
# here we list all keys to be renamed (original name on the left, our name on the right)
rename_keys = []
for i in range(self.config.decoder_config.decoder_layers):
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"{src_prefix}.layers.{i}.self_attn.out_proj.weight",
f"{dst_prefix}.layers.{i}.self_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"{src_prefix}.layers.{i}.self_attn.out_proj.bias",
f"{dst_prefix}.layers.{i}.self_attn.out_proj.bias",
)
)
rename_keys.append(
(
f"{src_prefix}.layers.{i}.multihead_attn.out_proj.weight",
f"{dst_prefix}.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"{src_prefix}.layers.{i}.multihead_attn.out_proj.bias",
f"{dst_prefix}.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"{src_prefix}.layers.{i}.linear1.weight", f"{dst_prefix}.layers.{i}.fc1.weight"))
rename_keys.append((f"{src_prefix}.layers.{i}.linear1.bias", f"{dst_prefix}.layers.{i}.fc1.bias"))
rename_keys.append((f"{src_prefix}.layers.{i}.linear2.weight", f"{dst_prefix}.layers.{i}.fc2.weight"))
rename_keys.append((f"{src_prefix}.layers.{i}.linear2.bias", f"{dst_prefix}.layers.{i}.fc2.bias"))
rename_keys.append(
(f"{src_prefix}.layers.{i}.norm1.weight", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append(
(f"{src_prefix}.layers.{i}.norm1.bias", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.bias")
)
rename_keys.append(
(f"{src_prefix}.layers.{i}.norm2.weight", f"{dst_prefix}.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"{src_prefix}.layers.{i}.norm2.bias", f"{dst_prefix}.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append(
(f"{src_prefix}.layers.{i}.norm3.weight", f"{dst_prefix}.layers.{i}.final_layer_norm.weight")
)
rename_keys.append(
(f"{src_prefix}.layers.{i}.norm3.bias", f"{dst_prefix}.layers.{i}.final_layer_norm.bias")
)
return rename_keys
def replace_q_k_v_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "transformer_module.decoder"
src_prefix: str = "sem_seg_head.predictor.transformer.decoder"
for i in range(self.config.decoder_config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
in_proj_weight = src_state_dict.pop(f"{src_prefix}.layers.{i}.self_attn.in_proj_weight")
in_proj_bias = src_state_dict.pop(f"{src_prefix}.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
in_proj_weight_cross_attn = src_state_dict.pop(f"{src_prefix}.layers.{i}.multihead_attn.in_proj_weight")
in_proj_bias_cross_attn = src_state_dict.pop(f"{src_prefix}.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :]
dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256]
dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[
256:512, :
]
dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512]
dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :]
dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:]
def replace_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "transformer_module.decoder"
src_prefix: str = "sem_seg_head.predictor.transformer.decoder"
renamed_keys = self.rename_keys_in_detr_decoder(dst_state_dict, src_state_dict)
# add more
renamed_keys.extend(
[
(f"{src_prefix}.norm.weight", f"{dst_prefix}.layernorm.weight"),
(f"{src_prefix}.norm.bias", f"{dst_prefix}.layernorm.bias"),
]
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
self.replace_q_k_v_in_detr_decoder(dst_state_dict, src_state_dict)
def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "transformer_module"
src_prefix: str = "sem_seg_head.predictor"
self.replace_detr_decoder(dst_state_dict, src_state_dict)
renamed_keys = [
(f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"),
(f"{src_prefix}.input_proj.weight", f"{dst_prefix}.input_projection.weight"),
(f"{src_prefix}.input_proj.bias", f"{dst_prefix}.input_projection.bias"),
]
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_instance_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
# NOTE in our case we don't have a prefix, thus we removed the "." from the keys later on!
dst_prefix: str = ""
src_prefix: str = "sem_seg_head.predictor"
renamed_keys = [
(f"{src_prefix}.class_embed.weight", f"{dst_prefix}class_predictor.weight"),
(f"{src_prefix}.class_embed.bias", f"{dst_prefix}class_predictor.bias"),
]
mlp_len = 3
for i in range(mlp_len):
renamed_keys.extend(
[
(f"{src_prefix}.mask_embed.layers.{i}.weight", f"{dst_prefix}mask_embedder.{i}.0.weight"),
(f"{src_prefix}.mask_embed.layers.{i}.bias", f"{dst_prefix}mask_embedder.{i}.0.bias"),
]
)
logger.info(f"Replacing keys {pformat(renamed_keys)}")
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def convert(self, mask_former: MaskFormerModel) -> MaskFormerModel:
dst_state_dict = TrackedStateDict(mask_former.state_dict())
src_state_dict = self.original_model.state_dict()
self.replace_pixel_module(dst_state_dict, src_state_dict)
self.replace_transformer_module(dst_state_dict, src_state_dict)
logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}")
logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}")
logger.info("🙌 Done")
mask_former.load_state_dict(dst_state_dict)
return mask_former
def convert_instance_segmentation(
self, mask_former: MaskFormerForInstanceSegmentation
) -> MaskFormerForInstanceSegmentation:
dst_state_dict = TrackedStateDict(mask_former.state_dict())
src_state_dict = self.original_model.state_dict()
self.replace_instance_segmentation_module(dst_state_dict, src_state_dict)
mask_former.load_state_dict(dst_state_dict)
return mask_former
@staticmethod
def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[tuple[object, Path, Path]]:
checkpoints: list[Path] = checkpoints_dir.glob("**/*.pkl")
for checkpoint in checkpoints:
logger.info(f"💪 Converting {checkpoint.stem}")
# find associated config file
config: Path = config_dir / checkpoint.parents[0].stem / "swin" / f"{checkpoint.stem}.yaml"
yield config, checkpoint
def test(original_model, our_model: MaskFormerForInstanceSegmentation, image_processor: MaskFormerImageProcessor):
with torch.no_grad():
original_model = original_model.eval()
our_model = our_model.eval()
im = prepare_img()
tr = T.Compose(
[
T.Resize((384, 384)),
T.ToTensor(),
T.Normalize(
mean=torch.tensor([123.675, 116.280, 103.530]) / 255.0,
std=torch.tensor([58.395, 57.120, 57.375]) / 255.0,
),
],
)
x = tr(im).unsqueeze(0)
original_model_backbone_features = original_model.backbone(x.clone())
our_model_output: MaskFormerModelOutput = our_model.model(x.clone(), output_hidden_states=True)
for original_model_feature, our_model_feature in zip(
original_model_backbone_features.values(), our_model_output.encoder_hidden_states
):
assert torch.allclose(original_model_feature, our_model_feature, atol=1e-3), (
"The backbone features are not the same."
)
original_model_pixel_out = original_model.sem_seg_head.pixel_decoder.forward_features(
original_model_backbone_features
)
assert torch.allclose(
original_model_pixel_out[0], our_model_output.pixel_decoder_last_hidden_state, atol=1e-4
), "The pixel decoder feature are not the same"
# let's test the full model
original_model_out = original_model([{"image": x.squeeze(0)}])
original_segmentation = original_model_out[0]["sem_seg"]
our_model_out: MaskFormerForInstanceSegmentationOutput = our_model(x)
our_segmentation = image_processor.post_process_segmentation(our_model_out, target_size=(384, 384))
assert torch.allclose(original_segmentation, our_segmentation, atol=1e-3), (
"The segmentation image is not the same."
)
logger.info("✅ Test passed!")
def get_name(checkpoint_file: Path):
model_name_raw: str = checkpoint_file.stem
# model_name_raw is something like maskformer_panoptic_swin_base_IN21k_384_bs64_554k
parent_name: str = checkpoint_file.parents[0].stem
backbone = "swin"
dataset = ""
if "coco" in parent_name:
dataset = "coco"
elif "ade" in parent_name:
dataset = "ade"
else:
raise ValueError(f"{parent_name} must be wrong since we didn't find 'coco' or 'ade' in it ")
backbone_types = ["tiny", "small", "base", "large"]
backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0]
model_name = f"maskformer-{backbone}-{backbone_type}-{dataset}"
return model_name
if __name__ == "__main__":
parser = ArgumentParser(
description="Command line to convert the original maskformers (with swin backbone) to our implementations."
)
parser.add_argument(
"--checkpoints_dir",
type=Path,
help=(
"A directory containing the model's checkpoints. The directory has to have the following structure:"
" <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.pkl\n"
"Given the files are in the pickle format, please be wary of passing it files you trust."
),
)
parser.add_argument(
"--configs_dir",
type=Path,
help=(
"A directory containing the model's configs, see detectron2 doc. The directory has to have the following"
" structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.yaml"
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
required=True,
type=Path,
help="Path to the folder to output PyTorch models.",
)
parser.add_argument(
"--maskformer_dir",
required=True,
type=Path,
help=(
"A path to MaskFormer's original implementation directory. You can download from here:"
" https://github.com/facebookresearch/MaskFormer"
),
)
args = parser.parse_args()
checkpoints_dir: Path = args.checkpoints_dir
config_dir: Path = args.configs_dir
save_directory: Path = args.pytorch_dump_folder_path
maskformer_dir: Path = args.maskformer_dir
# append the path to the parents to maskformer dir
sys.path.append(str(maskformer_dir.parent))
# and import what's needed
from MaskFormer.mask_former import add_mask_former_config
from MaskFormer.mask_former.mask_former_model import MaskFormer as OriginalMaskFormer
if not save_directory.exists():
save_directory.mkdir(parents=True)
for config_file, checkpoint_file in OriginalMaskFormerCheckpointToOursConverter.using_dirs(
checkpoints_dir, config_dir
):
image_processor = OriginalMaskFormerConfigToImageProcessorConverter()(setup_cfg(Args(config_file=config_file)))
original_config = setup_cfg(Args(config_file=config_file))
mask_former_kwargs = OriginalMaskFormer.from_config(original_config)
original_model = OriginalMaskFormer(**mask_former_kwargs).eval()
DetectionCheckpointer(original_model).load(str(checkpoint_file))
config: MaskFormerConfig = OriginalMaskFormerConfigToOursConverter()(original_config)
mask_former = MaskFormerModel(config=config).eval()
converter = OriginalMaskFormerCheckpointToOursConverter(original_model, config)
maskformer = converter.convert(mask_former)
mask_former_for_instance_segmentation = MaskFormerForInstanceSegmentation(config=config).eval()
mask_former_for_instance_segmentation.model = mask_former
mask_former_for_instance_segmentation = converter.convert_instance_segmentation(
mask_former_for_instance_segmentation
)
test(original_model, mask_former_for_instance_segmentation, image_processor)
model_name = get_name(checkpoint_file)
logger.info(f"🪄 Saving {model_name}")
image_processor.save_pretrained(save_directory / model_name)
mask_former_for_instance_segmentation.save_pretrained(save_directory / model_name)
image_processor.push_to_hub(
repo_path_or_name=save_directory / model_name,
commit_message="Add model",
use_temp_dir=True,
)
mask_former_for_instance_segmentation.push_to_hub(
repo_path_or_name=save_directory / model_name,
commit_message="Add model",
use_temp_dir=True,
)
| transformers/src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 16135
} | 511 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MGP-STR model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class MgpstrConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an [`MgpstrModel`]. It is used to instantiate an
MGP-STR model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the MGP-STR
[alibaba-damo/mgp-str-base](https://huggingface.co/alibaba-damo/mgp-str-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`list[int]`, *optional*, defaults to `[32, 128]`):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
max_token_length (`int`, *optional*, defaults to 27):
The max number of output tokens.
num_character_labels (`int`, *optional*, defaults to 38):
The number of classes for character head .
num_bpe_labels (`int`, *optional*, defaults to 50257):
The number of classes for bpe head .
num_wordpiece_labels (`int`, *optional*, defaults to 30522):
The number of classes for wordpiece head .
hidden_size (`int`, *optional*, defaults to 768):
The embedding dimension.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`float`, *optional*, defaults to 4.0):
The ratio of mlp hidden dim to embedding dim.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
distilled (`bool`, *optional*, defaults to `False`):
Model includes a distillation token and head as in DeiT models.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
drop_rate (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder.
attn_drop_rate (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The stochastic depth rate.
output_a3_attentions (`bool`, *optional*, defaults to `False`):
Whether or not the model should returns A^3 module attentions.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import MgpstrConfig, MgpstrForSceneTextRecognition
>>> # Initializing a Mgpstr mgp-str-base style configuration
>>> configuration = MgpstrConfig()
>>> # Initializing a model (with random weights) from the mgp-str-base style configuration
>>> model = MgpstrForSceneTextRecognition(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mgp-str"
def __init__(
self,
image_size=[32, 128],
patch_size=4,
num_channels=3,
max_token_length=27,
num_character_labels=38,
num_bpe_labels=50257,
num_wordpiece_labels=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
distilled=False,
layer_norm_eps=1e-5,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
output_a3_attentions=False,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.max_token_length = max_token_length
self.num_character_labels = num_character_labels
self.num_bpe_labels = num_bpe_labels
self.num_wordpiece_labels = num_wordpiece_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.distilled = distilled
self.layer_norm_eps = layer_norm_eps
self.drop_rate = drop_rate
self.qkv_bias = qkv_bias
self.attn_drop_rate = attn_drop_rate
self.drop_path_rate = drop_path_rate
self.output_a3_attentions = output_a3_attentions
self.initializer_range = initializer_range
__all__ = ["MgpstrConfig"]
| transformers/src/transformers/models/mgp_str/configuration_mgp_str.py/0 | {
"file_path": "transformers/src/transformers/models/mgp_str/configuration_mgp_str.py",
"repo_id": "transformers",
"token_count": 2254
} | 512 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/mlcd/modular_mlcd.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_mlcd.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional, Union
import torch
import torch.nn as nn
from ...activations import ACT2FN
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, torch_int
from .configuration_mlcd import MLCDVisionConfig
class MLCDMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class MLCDRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, dim: int, theta: float = 10000.0) -> None:
super().__init__()
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
def forward(self, num_patches_height: int, num_patches_width: int) -> torch.Tensor:
"""
Calculate the Rotary Position Embedding (RoPE) for MLCDVisionModel based on the grid size.
Args:
num_patches_height (int): Number of patches in the height dimension.
num_patches_width (int): Number of patches in the width dimension.
Returns:
torch.Tensor: Rotary positional embeddings for the given grid size.
"""
# Generate position IDs for height and width dimensions
hpos_ids = (
torch.arange(num_patches_height, device=self.inv_freq.device).unsqueeze(1).expand(-1, num_patches_width)
)
wpos_ids = (
torch.arange(num_patches_width, device=self.inv_freq.device).unsqueeze(0).expand(num_patches_height, -1)
)
# Flatten and stack the position IDs
pos_ids = torch.stack([hpos_ids.flatten(), wpos_ids.flatten()], dim=-1)
# Generate the full rotary positional embeddings for the maximum grid size
max_grid_size = max(num_patches_height, num_patches_width)
seq = torch.arange(max_grid_size, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
rotary_pos_emb_full = torch.outer(seq, self.inv_freq)
# Select and flatten the embeddings based on the position IDs
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
return rotary_pos_emb
class MLCDVisionEmbeddings(nn.Module):
def __init__(self, config: MLCDVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
bias=False,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
batch_size = pixel_values.shape[0]
target_dtype = self.patch_embedding.weight.dtype
# patch_embeds -> shape = [batch, width, grid, grid]
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def apply_rotary_pos_emb_vision(
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
orig_q_dtype = q.dtype
orig_k_dtype = k.dtype
q, k = q.float(), k.float()
cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
q_embed = q_embed.to(orig_q_dtype)
k_embed = k_embed.to(orig_k_dtype)
return q_embed, k_embed
class MLCDAttention(nn.Module):
"""Multi-headed attention with RoPE. Refer to papers:
- Attention is all you need:
https://huggingface.co/papers/1706.03762
- RoFormer: Enhanced Transformer with Rotary Position Embedding:
https://huggingface.co/papers/2104.09864
"""
def __init__(self, config: MLCDVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.num_key_value_groups = config.num_key_value_groups
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length = hidden_states.shape[:-1]
# Each of shape: [batch_size, seq_length, num_heads, head_dim]
query_states = self.q_proj(hidden_states).reshape((batch_size, seq_length, self.num_heads, self.head_dim))
key_states = self.k_proj(hidden_states).reshape((batch_size, seq_length, self.num_heads, self.head_dim))
value_states = self.v_proj(hidden_states).reshape((batch_size, seq_length, self.num_heads, self.head_dim))
# Apply positional embeddings
cos = position_embeddings[0].unsqueeze(0).float()
sin = position_embeddings[1].unsqueeze(0).float()
query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
# Each of shape: [batch_size, num_heads, seq_length, head_dim]
query_states = query_states.permute(0, 2, 1, 3).contiguous()
key_states = key_states.permute(0, 2, 1, 3).contiguous()
value_states = value_states.permute(0, 2, 1, 3).contiguous()
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scale,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.permute(1, 0, 2, 3).contiguous() # [seq_length, batch_size, num_heads, head_dim]
attn_output = attn_output.view(seq_length, batch_size, -1) # [seq_length, batch_size, embedding_dim]
attn_output = self.out_proj(attn_output)
attn_output = attn_output.permute(1, 0, 2).contiguous() # [batch_size, seq_length, embedding_dim]
return attn_output, attn_weights
class MLCDEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MLCDVisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = MLCDAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = MLCDMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`.
Represents the hidden states from the previous layer or the input embeddings.
position_embeddings (`tuple[torch.Tensor, torch.Tensor]`):
A tuple of two tensors, each of shape `(batch, seq_len, embed_dim)`.
Represents absolute positional embeddings for the query and key in the attention mechanism.
attention_mask (`torch.FloatTensor`):
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class MLCDEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`MLCDEncoderLayer`].
Args:
config: MLCDVisionConfig
"""
def __init__(self, config: MLCDVisionConfig):
"""Overwrite dummy `MLCDConfig` to `MLCDVisionConfig`."""
super().__init__()
self.config = config
self.layers = nn.ModuleList([MLCDEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds: torch.FloatTensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
position_embeddings (`tuple[torch.Tensor, torch.Tensor]`):
A tuple of two tensors, each of shape `(batch, seq_len, embed_dim)`.
Represents absolute positional embeddings for the query and key in the attention mechanism.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_states,
attentions=all_attentions,
)
class MLCDVisionTransformer(nn.Module):
def __init__(self, config: MLCDVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = MLCDVisionEmbeddings(config)
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = MLCDEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.vision_rotary_embedding = MLCDRotaryEmbedding(config.hidden_size // config.num_attention_heads // 2)
self.class_pos_emb = nn.Parameter(torch.randn(1, config.hidden_size // config.num_attention_heads // 2))
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
num_patches_height = pixel_values.shape[-2] // self.config.patch_size
num_patches_width = pixel_values.shape[-1] // self.config.patch_size
rotary_pos_emb = self.vision_rotary_embedding(num_patches_height, num_patches_width)
rotary_pos_emb = rotary_pos_emb.to(self.class_pos_emb.device)
rotary_pos_emb = torch.cat([self.class_pos_emb, rotary_pos_emb], dim=0)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
hidden_states = self.embeddings(pixel_values)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
position_embeddings=position_embeddings,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring
class MLCDPreTrainedModel(PreTrainedModel):
config: MLCDVisionConfig
base_model_prefix = "mlcd"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, MLCDVisionEmbeddings):
factor = self.config.initializer_factor
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
elif isinstance(module, MLCDAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, MLCDMLP):
factor = self.config.initializer_factor
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
nn.init.normal_(module.fc1.weight, std=fc_std)
nn.init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, MLCDVisionTransformer):
factor = self.config.initializer_factor
pos_emb_std = (module.config.hidden_size // module.config.num_attention_heads // 2) ** -0.5 * factor
nn.init.normal_(module.class_pos_emb, mean=0.0, std=pos_emb_std)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@auto_docstring(
custom_intro="""
The vision model from M_L_C_D without any head or projection on top.
"""
)
class MLCDVisionModel(MLCDPreTrainedModel):
config: MLCDVisionConfig
main_input_name = "pixel_values"
_no_split_modules = ["MLCDEncoderLayer"]
def __init__(self, config: MLCDVisionConfig):
super().__init__(config)
self.vision_model = MLCDVisionTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
Example:
```python
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoProcessor, MLCDVisionModel
>>> model = MLCDVisionModel.from_pretrained("DeepGlint-AI/mlcd-vit-bigG-patch14-448")
>>> processor = AutoProcessor.from_pretrained("DeepGlint-AI/mlcd-vit-bigG-patch14-448")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs, output_attentions=True)
>>> features = outputs.last_hidden_state
>>> print(f"Extracted features shape: {features.shape}")
>>> print(f"Number of attention layers: {len(outputs.attentions)}")
>>> print(f"Attention shape: {outputs.attentions[0].shape}")
```"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
return self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
__all__ = ["MLCDPreTrainedModel", "MLCDVisionModel"]
| transformers/src/transformers/models/mlcd/modeling_mlcd.py/0 | {
"file_path": "transformers/src/transformers/models/mlcd/modeling_mlcd.py",
"repo_id": "transformers",
"token_count": 11775
} | 513 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert MobileNetV2 checkpoints from the tensorflow/models library."""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetV2Config,
MobileNetV2ForImageClassification,
MobileNetV2ForSemanticSegmentation,
MobileNetV2ImageProcessor,
load_tf_weights_in_mobilenet_v2,
)
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def get_mobilenet_v2_config(model_name):
config = MobileNetV2Config(layer_norm_eps=0.001)
if "quant" in model_name:
raise ValueError("Quantized models are not supported.")
matches = re.match(r"^.*mobilenet_v2_([^_]*)_([^_]*)$", model_name)
if matches:
config.depth_multiplier = float(matches[1])
config.image_size = int(matches[2])
if model_name.startswith("deeplabv3_"):
config.output_stride = 8
config.num_labels = 21
filename = "pascal-voc-id2label.json"
else:
# The TensorFlow version of MobileNetV2 predicts 1001 classes instead
# of the usual 1000. The first class (index 0) is "background".
config.num_labels = 1001
filename = "imagenet-1k-id2label.json"
repo_id = "huggingface/label-files"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
if config.num_labels == 1001:
id2label = {int(k) + 1: v for k, v in id2label.items()}
id2label[0] = "background"
else:
id2label = {int(k): v for k, v in id2label.items()}
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
return config
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@torch.no_grad()
def convert_movilevit_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
"""
Copy/paste/tweak model's weights to our MobileNetV2 structure.
"""
config = get_mobilenet_v2_config(model_name)
# Load 🤗 model
if model_name.startswith("deeplabv3_"):
model = MobileNetV2ForSemanticSegmentation(config).eval()
else:
model = MobileNetV2ForImageClassification(config).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_v2(model, config, checkpoint_path)
# Check outputs on an image, prepared by MobileNetV2ImageProcessor
image_processor = MobileNetV2ImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size},
size={"shortest_edge": config.image_size + 32},
)
encoding = image_processor(images=prepare_img(), return_tensors="pt")
outputs = model(**encoding)
logits = outputs.logits
if model_name.startswith("deeplabv3_"):
assert logits.shape == (1, 21, 65, 65)
if model_name == "deeplabv3_mobilenet_v2_1.0_513":
expected_logits = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
]
)
else:
raise ValueError(f"Unknown model name: {model_name}")
assert torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-4)
else:
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v2_1.4_224":
expected_logits = torch.tensor([0.0181, -1.0015, 0.4688])
elif model_name == "mobilenet_v2_1.0_224":
expected_logits = torch.tensor([0.2445, -1.1993, 0.1905])
elif model_name == "mobilenet_v2_0.75_160":
expected_logits = torch.tensor([0.2482, 0.4136, 0.6669])
elif model_name == "mobilenet_v2_0.35_96":
expected_logits = torch.tensor([0.1451, -0.4624, 0.7192])
else:
expected_logits = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print("Pushing to the hub...")
repo_id = "google/" + model_name
image_processor.push_to_hub(repo_id)
model.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v2_1.0_224",
type=str,
help="Name of the MobileNetV2 model you'd like to convert. Should in the form 'mobilenet_v2_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
args = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| transformers/src/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 2730
} | 514 |
# coding=utf-8
# Copyright 2023 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE
"""PyTorch MobileViTV2 model."""
from typing import Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
SemanticSegmenterOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from .configuration_mobilevitv2 import MobileViTV2Config
logger = logging.get_logger(__name__)
# Copied from transformers.models.mobilevit.modeling_mobilevit.make_divisible
def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int:
"""
Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the
original TensorFlow repo. It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_value < 0.9 * value:
new_value += divisor
return int(new_value)
def clip(value: float, min_val: float = float("-inf"), max_val: float = float("inf")) -> float:
return max(min_val, min(max_val, value))
# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTConvLayer with MobileViT->MobileViTV2
class MobileViTV2ConvLayer(nn.Module):
def __init__(
self,
config: MobileViTV2Config,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
groups: int = 1,
bias: bool = False,
dilation: int = 1,
use_normalization: bool = True,
use_activation: Union[bool, str] = True,
) -> None:
super().__init__()
padding = int((kernel_size - 1) / 2) * dilation
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
self.convolution = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode="zeros",
)
if use_normalization:
self.normalization = nn.BatchNorm2d(
num_features=out_channels,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
)
else:
self.normalization = None
if use_activation:
if isinstance(use_activation, str):
self.activation = ACT2FN[use_activation]
elif isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
else:
self.activation = None
def forward(self, features: torch.Tensor) -> torch.Tensor:
features = self.convolution(features)
if self.normalization is not None:
features = self.normalization(features)
if self.activation is not None:
features = self.activation(features)
return features
# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTInvertedResidual with MobileViT->MobileViTV2
class MobileViTV2InvertedResidual(nn.Module):
"""
Inverted residual block (MobileNetv2): https://huggingface.co/papers/1801.04381
"""
def __init__(
self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1
) -> None:
super().__init__()
expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
if stride not in [1, 2]:
raise ValueError(f"Invalid stride {stride}.")
self.use_residual = (stride == 1) and (in_channels == out_channels)
self.expand_1x1 = MobileViTV2ConvLayer(
config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1
)
self.conv_3x3 = MobileViTV2ConvLayer(
config,
in_channels=expanded_channels,
out_channels=expanded_channels,
kernel_size=3,
stride=stride,
groups=expanded_channels,
dilation=dilation,
)
self.reduce_1x1 = MobileViTV2ConvLayer(
config,
in_channels=expanded_channels,
out_channels=out_channels,
kernel_size=1,
use_activation=False,
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
residual = features
features = self.expand_1x1(features)
features = self.conv_3x3(features)
features = self.reduce_1x1(features)
return residual + features if self.use_residual else features
# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTMobileNetLayer with MobileViT->MobileViTV2
class MobileViTV2MobileNetLayer(nn.Module):
def __init__(
self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1
) -> None:
super().__init__()
self.layer = nn.ModuleList()
for i in range(num_stages):
layer = MobileViTV2InvertedResidual(
config,
in_channels=in_channels,
out_channels=out_channels,
stride=stride if i == 0 else 1,
)
self.layer.append(layer)
in_channels = out_channels
def forward(self, features: torch.Tensor) -> torch.Tensor:
for layer_module in self.layer:
features = layer_module(features)
return features
class MobileViTV2LinearSelfAttention(nn.Module):
"""
This layer applies a self-attention with linear complexity, as described in MobileViTV2 paper:
https://huggingface.co/papers/2206.02680
Args:
config (`MobileVitv2Config`):
Model configuration object
embed_dim (`int`):
`input_channels` from an expected input of size :math:`(batch_size, input_channels, height, width)`
"""
def __init__(self, config: MobileViTV2Config, embed_dim: int) -> None:
super().__init__()
self.qkv_proj = MobileViTV2ConvLayer(
config=config,
in_channels=embed_dim,
out_channels=1 + (2 * embed_dim),
bias=True,
kernel_size=1,
use_normalization=False,
use_activation=False,
)
self.attn_dropout = nn.Dropout(p=config.attn_dropout)
self.out_proj = MobileViTV2ConvLayer(
config=config,
in_channels=embed_dim,
out_channels=embed_dim,
bias=True,
kernel_size=1,
use_normalization=False,
use_activation=False,
)
self.embed_dim = embed_dim
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# (batch_size, embed_dim, num_pixels_in_patch, num_patches) --> (batch_size, 1+2*embed_dim, num_pixels_in_patch, num_patches)
qkv = self.qkv_proj(hidden_states)
# Project hidden_states into query, key and value
# Query --> [batch_size, 1, num_pixels_in_patch, num_patches]
# value, key --> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
query, key, value = torch.split(qkv, split_size_or_sections=[1, self.embed_dim, self.embed_dim], dim=1)
# apply softmax along num_patches dimension
context_scores = torch.nn.functional.softmax(query, dim=-1)
context_scores = self.attn_dropout(context_scores)
# Compute context vector
# [batch_size, embed_dim, num_pixels_in_patch, num_patches] x [batch_size, 1, num_pixels_in_patch, num_patches] -> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
context_vector = key * context_scores
# [batch_size, embed_dim, num_pixels_in_patch, num_patches] --> [batch_size, embed_dim, num_pixels_in_patch, 1]
context_vector = torch.sum(context_vector, dim=-1, keepdim=True)
# combine context vector with values
# [batch_size, embed_dim, num_pixels_in_patch, num_patches] * [batch_size, embed_dim, num_pixels_in_patch, 1] --> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
out = torch.nn.functional.relu(value) * context_vector.expand_as(value)
out = self.out_proj(out)
return out
class MobileViTV2FFN(nn.Module):
def __init__(
self,
config: MobileViTV2Config,
embed_dim: int,
ffn_latent_dim: int,
ffn_dropout: float = 0.0,
) -> None:
super().__init__()
self.conv1 = MobileViTV2ConvLayer(
config=config,
in_channels=embed_dim,
out_channels=ffn_latent_dim,
kernel_size=1,
stride=1,
bias=True,
use_normalization=False,
use_activation=True,
)
self.dropout1 = nn.Dropout(ffn_dropout)
self.conv2 = MobileViTV2ConvLayer(
config=config,
in_channels=ffn_latent_dim,
out_channels=embed_dim,
kernel_size=1,
stride=1,
bias=True,
use_normalization=False,
use_activation=False,
)
self.dropout2 = nn.Dropout(ffn_dropout)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.conv1(hidden_states)
hidden_states = self.dropout1(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.dropout2(hidden_states)
return hidden_states
class MobileViTV2TransformerLayer(nn.Module):
def __init__(
self,
config: MobileViTV2Config,
embed_dim: int,
ffn_latent_dim: int,
dropout: float = 0.0,
) -> None:
super().__init__()
self.layernorm_before = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps)
self.attention = MobileViTV2LinearSelfAttention(config, embed_dim)
self.dropout1 = nn.Dropout(p=dropout)
self.layernorm_after = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps)
self.ffn = MobileViTV2FFN(config, embed_dim, ffn_latent_dim, config.ffn_dropout)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
layernorm_1_out = self.layernorm_before(hidden_states)
attention_output = self.attention(layernorm_1_out)
hidden_states = attention_output + hidden_states
layer_output = self.layernorm_after(hidden_states)
layer_output = self.ffn(layer_output)
layer_output = layer_output + hidden_states
return layer_output
class MobileViTV2Transformer(nn.Module):
def __init__(self, config: MobileViTV2Config, n_layers: int, d_model: int) -> None:
super().__init__()
ffn_multiplier = config.ffn_multiplier
ffn_dims = [ffn_multiplier * d_model] * n_layers
# ensure that dims are multiple of 16
ffn_dims = [int((d // 16) * 16) for d in ffn_dims]
self.layer = nn.ModuleList()
for block_idx in range(n_layers):
transformer_layer = MobileViTV2TransformerLayer(
config, embed_dim=d_model, ffn_latent_dim=ffn_dims[block_idx]
)
self.layer.append(transformer_layer)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
for layer_module in self.layer:
hidden_states = layer_module(hidden_states)
return hidden_states
class MobileViTV2Layer(GradientCheckpointingLayer):
"""
MobileViTV2 layer: https://huggingface.co/papers/2206.02680
"""
def __init__(
self,
config: MobileViTV2Config,
in_channels: int,
out_channels: int,
attn_unit_dim: int,
n_attn_blocks: int = 2,
dilation: int = 1,
stride: int = 2,
) -> None:
super().__init__()
self.patch_width = config.patch_size
self.patch_height = config.patch_size
cnn_out_dim = attn_unit_dim
if stride == 2:
self.downsampling_layer = MobileViTV2InvertedResidual(
config,
in_channels=in_channels,
out_channels=out_channels,
stride=stride if dilation == 1 else 1,
dilation=dilation // 2 if dilation > 1 else 1,
)
in_channels = out_channels
else:
self.downsampling_layer = None
# Local representations
self.conv_kxk = MobileViTV2ConvLayer(
config,
in_channels=in_channels,
out_channels=in_channels,
kernel_size=config.conv_kernel_size,
groups=in_channels,
)
self.conv_1x1 = MobileViTV2ConvLayer(
config,
in_channels=in_channels,
out_channels=cnn_out_dim,
kernel_size=1,
use_normalization=False,
use_activation=False,
)
# Global representations
self.transformer = MobileViTV2Transformer(config, d_model=attn_unit_dim, n_layers=n_attn_blocks)
# self.layernorm = MobileViTV2LayerNorm2D(attn_unit_dim, eps=config.layer_norm_eps)
self.layernorm = nn.GroupNorm(num_groups=1, num_channels=attn_unit_dim, eps=config.layer_norm_eps)
# Fusion
self.conv_projection = MobileViTV2ConvLayer(
config,
in_channels=cnn_out_dim,
out_channels=in_channels,
kernel_size=1,
use_normalization=True,
use_activation=False,
)
def unfolding(self, feature_map: torch.Tensor) -> tuple[torch.Tensor, tuple[int, int]]:
batch_size, in_channels, img_height, img_width = feature_map.shape
patches = nn.functional.unfold(
feature_map,
kernel_size=(self.patch_height, self.patch_width),
stride=(self.patch_height, self.patch_width),
)
patches = patches.reshape(batch_size, in_channels, self.patch_height * self.patch_width, -1)
return patches, (img_height, img_width)
def folding(self, patches: torch.Tensor, output_size: tuple[int, int]) -> torch.Tensor:
batch_size, in_dim, patch_size, n_patches = patches.shape
patches = patches.reshape(batch_size, in_dim * patch_size, n_patches)
feature_map = nn.functional.fold(
patches,
output_size=output_size,
kernel_size=(self.patch_height, self.patch_width),
stride=(self.patch_height, self.patch_width),
)
return feature_map
def forward(self, features: torch.Tensor) -> torch.Tensor:
# reduce spatial dimensions if needed
if self.downsampling_layer:
features = self.downsampling_layer(features)
# local representation
features = self.conv_kxk(features)
features = self.conv_1x1(features)
# convert feature map to patches
patches, output_size = self.unfolding(features)
# learn global representations
patches = self.transformer(patches)
patches = self.layernorm(patches)
# convert patches back to feature maps
# [batch_size, patch_height, patch_width, input_dim] --> [batch_size, input_dim, patch_height, patch_width]
features = self.folding(patches, output_size)
features = self.conv_projection(features)
return features
class MobileViTV2Encoder(nn.Module):
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList()
self.gradient_checkpointing = False
# segmentation architectures like DeepLab and PSPNet modify the strides
# of the classification backbones
dilate_layer_4 = dilate_layer_5 = False
if config.output_stride == 8:
dilate_layer_4 = True
dilate_layer_5 = True
elif config.output_stride == 16:
dilate_layer_5 = True
dilation = 1
layer_0_dim = make_divisible(
clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16
)
layer_1_dim = make_divisible(64 * config.width_multiplier, divisor=16)
layer_2_dim = make_divisible(128 * config.width_multiplier, divisor=8)
layer_3_dim = make_divisible(256 * config.width_multiplier, divisor=8)
layer_4_dim = make_divisible(384 * config.width_multiplier, divisor=8)
layer_5_dim = make_divisible(512 * config.width_multiplier, divisor=8)
layer_1 = MobileViTV2MobileNetLayer(
config,
in_channels=layer_0_dim,
out_channels=layer_1_dim,
stride=1,
num_stages=1,
)
self.layer.append(layer_1)
layer_2 = MobileViTV2MobileNetLayer(
config,
in_channels=layer_1_dim,
out_channels=layer_2_dim,
stride=2,
num_stages=2,
)
self.layer.append(layer_2)
layer_3 = MobileViTV2Layer(
config,
in_channels=layer_2_dim,
out_channels=layer_3_dim,
attn_unit_dim=make_divisible(config.base_attn_unit_dims[0] * config.width_multiplier, divisor=8),
n_attn_blocks=config.n_attn_blocks[0],
)
self.layer.append(layer_3)
if dilate_layer_4:
dilation *= 2
layer_4 = MobileViTV2Layer(
config,
in_channels=layer_3_dim,
out_channels=layer_4_dim,
attn_unit_dim=make_divisible(config.base_attn_unit_dims[1] * config.width_multiplier, divisor=8),
n_attn_blocks=config.n_attn_blocks[1],
dilation=dilation,
)
self.layer.append(layer_4)
if dilate_layer_5:
dilation *= 2
layer_5 = MobileViTV2Layer(
config,
in_channels=layer_4_dim,
out_channels=layer_5_dim,
attn_unit_dim=make_divisible(config.base_attn_unit_dims[2] * config.width_multiplier, divisor=8),
n_attn_blocks=config.n_attn_blocks[2],
dilation=dilation,
)
self.layer.append(layer_5)
def forward(
self,
hidden_states: torch.Tensor,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[tuple, BaseModelOutputWithNoAttention]:
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
@auto_docstring
class MobileViTV2PreTrainedModel(PreTrainedModel):
config: MobileViTV2Config
base_model_prefix = "mobilevitv2"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["MobileViTV2Layer"]
def _init_weights(self, module: nn.Module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.GroupNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@auto_docstring
class MobileViTV2Model(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config, expand_output: bool = True):
r"""
expand_output (`bool`, *optional*, defaults to `True`):
Whether to expand the output of the model. If `True`, the model will output pooled features in addition to
hidden states. If `False`, only the hidden states will be returned.
"""
super().__init__(config)
self.config = config
self.expand_output = expand_output
layer_0_dim = make_divisible(
clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16
)
self.conv_stem = MobileViTV2ConvLayer(
config,
in_channels=config.num_channels,
out_channels=layer_0_dim,
kernel_size=3,
stride=2,
use_normalization=True,
use_activation=True,
)
self.encoder = MobileViTV2Encoder(config)
# Initialize weights and apply final processing
self.post_init()
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
"""
for layer_index, heads in heads_to_prune.items():
mobilevitv2_layer = self.encoder.layer[layer_index]
if isinstance(mobilevitv2_layer, MobileViTV2Layer):
for transformer_layer in mobilevitv2_layer.transformer.layer:
transformer_layer.attention.prune_heads(heads)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.conv_stem(pixel_values)
encoder_outputs = self.encoder(
embedding_output,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.expand_output:
last_hidden_state = encoder_outputs[0]
# global average pooling: (batch_size, channels, height, width) -> (batch_size, channels)
pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False)
else:
last_hidden_state = encoder_outputs[0]
pooled_output = None
if not return_dict:
output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
return output + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
MobileViTV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
class MobileViTV2ForImageClassification(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilevitv2 = MobileViTV2Model(config)
out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension
# Classifier head
self.classifier = (
nn.Linear(in_features=out_channels, out_features=config.num_labels)
if config.num_labels > 0
else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilevitv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
)
# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTASPPPooling with MobileViT->MobileViTV2
class MobileViTV2ASPPPooling(nn.Module):
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int) -> None:
super().__init__()
self.global_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv_1x1 = MobileViTV2ConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
use_normalization=True,
use_activation="relu",
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
spatial_size = features.shape[-2:]
features = self.global_pool(features)
features = self.conv_1x1(features)
features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False)
return features
class MobileViTV2ASPP(nn.Module):
"""
ASPP module defined in DeepLab papers: https://huggingface.co/papers/1606.00915, https://huggingface.co/papers/1706.05587
"""
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__()
encoder_out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension
in_channels = encoder_out_channels
out_channels = config.aspp_out_channels
if len(config.atrous_rates) != 3:
raise ValueError("Expected 3 values for atrous_rates")
self.convs = nn.ModuleList()
in_projection = MobileViTV2ConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
use_activation="relu",
)
self.convs.append(in_projection)
self.convs.extend(
[
MobileViTV2ConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
dilation=rate,
use_activation="relu",
)
for rate in config.atrous_rates
]
)
pool_layer = MobileViTV2ASPPPooling(config, in_channels, out_channels)
self.convs.append(pool_layer)
self.project = MobileViTV2ConvLayer(
config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu"
)
self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
def forward(self, features: torch.Tensor) -> torch.Tensor:
pyramid = []
for conv in self.convs:
pyramid.append(conv(features))
pyramid = torch.cat(pyramid, dim=1)
pooled_features = self.project(pyramid)
pooled_features = self.dropout(pooled_features)
return pooled_features
# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTDeepLabV3 with MobileViT->MobileViTV2
class MobileViTV2DeepLabV3(nn.Module):
"""
DeepLabv3 architecture: https://huggingface.co/papers/1706.05587
"""
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__()
self.aspp = MobileViTV2ASPP(config)
self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
self.classifier = MobileViTV2ConvLayer(
config,
in_channels=config.aspp_out_channels,
out_channels=config.num_labels,
kernel_size=1,
use_normalization=False,
use_activation=False,
bias=True,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
features = self.aspp(hidden_states[-1])
features = self.dropout(features)
features = self.classifier(features)
return features
@auto_docstring(
custom_intro="""
MobileViTV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.
"""
)
class MobileViTV2ForSemanticSegmentation(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilevitv2 = MobileViTV2Model(config, expand_output=False)
self.segmentation_head = MobileViTV2DeepLabV3(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SemanticSegmenterOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from transformers import AutoImageProcessor, MobileViTV2ForSemanticSegmentation
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
>>> model = MobileViTV2ForSemanticSegmentation.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one")
outputs = self.mobilevitv2(
pixel_values,
output_hidden_states=True, # we need the intermediate hidden states
return_dict=return_dict,
)
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
logits = self.segmentation_head(encoder_hidden_states)
loss = None
if labels is not None:
# upsample logits to the images' original size
upsampled_logits = nn.functional.interpolate(
logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
)
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
loss = loss_fct(upsampled_logits, labels)
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
__all__ = [
"MobileViTV2ForImageClassification",
"MobileViTV2ForSemanticSegmentation",
"MobileViTV2Model",
"MobileViTV2PreTrainedModel",
]
| transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py/0 | {
"file_path": "transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py",
"repo_id": "transformers",
"token_count": 16140
} | 515 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Moshi checkpoints."""
import argparse
import safetensors
import sentencepiece
import torch
from transformers import (
AutoFeatureExtractor,
GenerationConfig,
MimiModel, # initial audio encoder
MoshiConfig,
MoshiForConditionalGeneration,
PreTrainedTokenizerFast,
logging,
)
from transformers.convert_slow_tokenizer import MoshiConverter
logging.set_verbosity_info()
logger = logging.get_logger("transformers.models.mimi")
def assert_param_count(model_1, model_2):
count_1 = sum(p[1].numel() for p in model_1.named_parameters() if "final_proj" not in p[0])
count_2 = sum(p[1].numel() for p in model_2.named_parameters() if "final_proj" not in p[0])
assert count_1 == count_2, f"{model_1.__class__}: {count_1} != {model_2.__class__}: {count_2}"
def param_count(model):
return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0])
def _grab_best_device(use_gpu=True):
if torch.cuda.device_count() > 0 and use_gpu:
device = "cuda"
else:
device = "cpu"
return torch.device(device)
convert_list = [
# GENERAL
("out_norm", "decoder.model.norm"),
("depformer_emb", "depth_decoder.emb"),
("depformer_text_emb", "depth_decoder.text_emb"),
("text_emb", "decoder.model.emb"),
("emb", "embed_tokens"),
("text_linear", "decoder.lm_head"),
("depformer", "depth_decoder"),
("transformer", "decoder.model"),
# TRANSFORMERS PART
("gating.linear_in", "mlp.fc1"),
("gating.linear_out", "mlp.fc2"),
("self_attn.out_proj", "self_attn.o_proj.linear"),
("norm1", "input_layernorm"),
("norm2", "post_attention_layernorm"),
("layer_scale_1", "self_attn_layer_scale"),
("layer_scale_2", "mlp_layer_scale"),
("alpha", "weight"),
]
def _preprocess_state_dict(state_dict, config):
# Moshi original weights are using a gating mechanism
# pattern for depth transformer:
# stack(gating.{i}.linear_in)->mlp.fc1
# stack(gating.{i}.linear_out)->mlp.fc2
for layer_idx in range(config.depth_decoder_config.num_hidden_layers):
linear_layers_in = [
state_dict.pop(f"depformer.layers.{layer_idx}.gating.{i}.linear_in.weight")
for i in range(config.num_codebooks)
]
linear_layers_out = [
state_dict.pop(f"depformer.layers.{layer_idx}.gating.{i}.linear_out.weight")
for i in range(config.num_codebooks)
]
state_dict[f"depth_decoder.layers.{layer_idx}.mlp.fc1.weight"] = torch.stack(linear_layers_in)
state_dict[f"depth_decoder.layers.{layer_idx}.mlp.fc2.weight"] = torch.stack(linear_layers_out)
input_projections = []
lm_heads = []
for codebook_idx in range(config.num_codebooks):
input_projections.append(state_dict.pop(f"depformer_in.{codebook_idx}.weight"))
lm_heads.append(state_dict.pop(f"linears.{codebook_idx}.weight"))
state_dict["depth_decoder.input_projections.weight"] = torch.stack(input_projections, dim=0)
state_dict["depth_decoder.lm_heads.weight"] = torch.stack(lm_heads, dim=0)
return state_dict
def _convert_model(
state_dict,
hf_model,
convert_list,
device,
config,
unwanted_prefix=None,
):
hidden_size = config.hidden_size
head_dim = config.head_dim
num_heads = int(config.hidden_size // config.head_dim)
num_key_value_heads = config.num_key_value_heads
key_value_head_dim = config.num_key_value_heads * head_dim
state_dict = _preprocess_state_dict(state_dict, config)
# permute for sliced rotary
def permute(w, n_heads, dim1=hidden_size, dim2=hidden_size):
return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2)
for k, v in list(state_dict.items()):
if "audio_encoder" not in k:
new_k = k if unwanted_prefix is None else k[len(unwanted_prefix) :]
for old_layer_name, new_layer_name in convert_list:
if old_layer_name in new_k:
new_k = new_k.replace(old_layer_name, new_layer_name)
if "alpha" in k:
state_dict[k] = state_dict[k].squeeze()
if "in_proj_weight" in new_k:
# split qkv into query key and value
mixed_qkv = state_dict.pop(k)
if "depth_decoder" in new_k:
mixed_qkv = mixed_qkv.view(config.num_codebooks, -1, mixed_qkv.shape[-1])
qkv_dim = mixed_qkv.size(1) // 3
query_layer = mixed_qkv[:, :qkv_dim]
key_layer = mixed_qkv[:, qkv_dim : qkv_dim * 2]
value_layer = mixed_qkv[:, qkv_dim * 2 :]
state_dict[new_k.replace("in_proj_weight", "q_proj.linear.weight")] = query_layer
state_dict[new_k.replace("in_proj_weight", "k_proj.linear.weight")] = key_layer
else:
qkv_dim = mixed_qkv.size(0) // 3
query_layer = mixed_qkv[:qkv_dim]
key_layer = mixed_qkv[qkv_dim : qkv_dim * 2]
value_layer = mixed_qkv[qkv_dim * 2 :]
state_dict[new_k.replace("in_proj_weight", "q_proj.linear.weight")] = permute(
query_layer, num_heads, hidden_size, hidden_size
)
state_dict[new_k.replace("in_proj_weight", "k_proj.linear.weight")] = permute(
key_layer, num_key_value_heads, key_value_head_dim, hidden_size
)
state_dict[new_k.replace("in_proj_weight", "v_proj.linear.weight")] = value_layer
elif "o_proj" in new_k and "depth_decoder" in new_k:
output_layer = state_dict.pop(k)
state_dict[new_k] = output_layer.view(config.num_codebooks, -1, output_layer.shape[-1])
else:
state_dict[new_k] = state_dict.pop(k)
# Do the last one by hand
state_dict["depth_decoder.text_embed_tokens.weight"] = state_dict.pop(
"depth_decoder.decoder.model.embed_tokens.weight"
)
extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys())
missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys())
if len(extra_keys) != 0:
raise ValueError(f"extra keys found: {extra_keys}")
if len(missing_keys) != 0:
raise ValueError(f"missing keys: {missing_keys}")
hf_model.load_state_dict(state_dict, strict=True)
n_params = param_count(hf_model)
logger.info(f"model loaded: {round(n_params / 1e6, 1)}M params")
hf_model.eval()
hf_model.to(device)
del state_dict
return hf_model
@torch.no_grad()
def convert_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
mimi_repo_id,
config_path=None,
repo_id=None,
):
"""
Copy/paste/tweak model's weights to transformers design.
"""
device = _grab_best_device()
mimi_model = MimiModel.from_pretrained(mimi_repo_id, dtype=torch.bfloat16)
if config_path is not None:
config = MoshiConfig.from_pretrained(config_path)
else:
audio_encoder_config = mimi_model.config
config = MoshiConfig.from_audio_encoder_config(audio_encoder_config)
model = MoshiForConditionalGeneration(config).to(torch.bfloat16)
depth_decoder_generation_config = GenerationConfig(
do_sample=True,
temperature=0.8,
top_k=250,
min_length=config.num_codebooks + 1,
max_length=config.num_codebooks + 1,
cache_implementation="sliding_window",
)
generation_config = GenerationConfig(
do_sample=True,
temp=0.7,
top_k=25,
cache_implementation="sliding_window",
pad_token_id=config.vocab_size,
bos_token_id=config.vocab_size,
)
generation_config.depth_decoder_config = depth_decoder_generation_config.to_diff_dict()
model.generation_config = generation_config
original_checkpoint = safetensors.torch.load_file(checkpoint_path)
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
original_checkpoint = original_checkpoint["best_state"]
audio_checkpoint = mimi_model.state_dict()
original_checkpoint.update({f"audio_encoder.{key}": value for (key, value) in audio_checkpoint.items()})
model = _convert_model(original_checkpoint, model, convert_list, device, config)
model.save_pretrained(pytorch_dump_folder_path)
if repo_id:
print("Pushing to the hub...")
model.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument(
"--tokenizer_vocab_path", required=False, default=None, type=str, help="Path to original tokenizer vocab file"
)
parser.add_argument("--mimi_repo_id", required=True, default=None, type=str, help="Repository id to HF Mimi.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
args = parser.parse_args()
# convert tokenizer
if args.tokenizer_vocab_path:
original_tokenizer = sentencepiece.SentencePieceProcessor(args.tokenizer_vocab_path)
tokenizer = MoshiConverter(args.tokenizer_vocab_path).converted()
tokenizer = PreTrainedTokenizerFast(
tokenizer_object=tokenizer,
chat_template=None,
unk_token="<unk>",
model_input_names=["input_ids", "attention_mask"],
clean_up_tokenization_spaces=False,
bos_token_id=original_tokenizer.bos_id(),
eos_token_id=original_tokenizer.eos_id(),
pad_token_id=original_tokenizer.pad_id(),
)
tokenizer.save_pretrained(args.pytorch_dump_folder_path)
if args.push_to_hub:
print("Pushing the tokenizer to the hub...")
tokenizer.push_to_hub(args.push_to_hub)
# upload feature extractor
feature_extractor = AutoFeatureExtractor.from_pretrained(args.mimi_repo_id)
feature_extractor.save_pretrained(args.pytorch_dump_folder_path)
if args.push_to_hub:
print("Pushing the feature extractor to the hub...")
feature_extractor.push_to_hub(args.push_to_hub)
convert_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.mimi_repo_id,
args.config_path,
args.push_to_hub,
)
| transformers/src/transformers/models/moshi/convert_moshi_transformers.py/0 | {
"file_path": "transformers/src/transformers/models/moshi/convert_moshi_transformers.py",
"repo_id": "transformers",
"token_count": 5074
} | 516 |
# coding=utf-8
# Copyright 2020, The T5 Authors and HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mT5 model configuration"""
from collections.abc import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeq2SeqConfigWithPast
from ...utils import logging
logger = logging.get_logger(__name__)
class MT5Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MT5Model`] or a [`TFMT5Model`]. It is used to
instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the mT5
[google/mt5-small](https://huggingface.co/google/mt5-small) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 250112):
Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`].
d_model (`int`, *optional*, defaults to 512):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. In the conventional context, it is typically expected that `d_kv` has to be equal to `d_model // num_heads`.
But in the architecture of mt5-small, `d_kv` is not equal to `d_model //num_heads`. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`.
d_ff (`int`, *optional*, defaults to 1024):
Size of the intermediate feed forward layer in each `T5Block`.
num_layers (`int`, *optional*, defaults to 8):
Number of hidden layers in the Transformer encoder.
num_decoder_layers (`int`, *optional*):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer encoder.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
"""
model_type = "mt5"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
"head_dim": "d_kv",
}
def __init__(
self,
vocab_size=250112,
d_model=512,
d_kv=64,
d_ff=1024,
num_layers=8,
num_decoder_layers=None,
num_heads=6,
relative_attention_num_buckets=32,
relative_attention_max_distance=128,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
initializer_factor=1.0,
feed_forward_proj="gated-gelu",
is_encoder_decoder=True,
use_cache=True,
tokenizer_class="T5Tokenizer",
tie_word_embeddings=False,
pad_token_id=0,
eos_token_id=1,
decoder_start_token_id=0,
classifier_dropout=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.dropout_rate = dropout_rate
self.classifier_dropout = classifier_dropout
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
act_info = self.feed_forward_proj.split("-")
self.dense_act_fn = act_info[-1]
self.is_gated_act = act_info[0] == "gated"
if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'"
)
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
self.dense_act_fn = "gelu_new"
super().__init__(
is_encoder_decoder=is_encoder_decoder,
tokenizer_class=tokenizer_class,
tie_word_embeddings=tie_word_embeddings,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
class MT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def inputs(self) -> Mapping[str, Mapping[int, str]]:
common_inputs = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
common_inputs["decoder_input_ids"] = {0: "batch"}
common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction="inputs")
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def default_onnx_opset(self) -> int:
return 13
@property
def atol_for_validation(self) -> float:
return 5e-4
__all__ = ["MT5Config", "MT5OnnxConfig"]
| transformers/src/transformers/models/mt5/configuration_mt5.py/0 | {
"file_path": "transformers/src/transformers/models/mt5/configuration_mt5.py",
"repo_id": "transformers",
"token_count": 3344
} | 517 |
# coding=utf-8
# Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text/audio processor class for MusicGen Melody
"""
from typing import Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
from ...utils.import_utils import requires
@requires(backends=("torchaudio",))
class MusicgenMelodyProcessor(ProcessorMixin):
r"""
Constructs a MusicGen Melody processor which wraps a Wav2Vec2 feature extractor - for raw audio waveform processing - and a T5 tokenizer into a single processor
class.
[`MusicgenProcessor`] offers all the functionalities of [`MusicgenMelodyFeatureExtractor`] and [`T5Tokenizer`]. See
[`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.
Args:
feature_extractor (`MusicgenMelodyFeatureExtractor`):
An instance of [`MusicgenMelodyFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`T5Tokenizer`):
An instance of [`T5Tokenizer`]. The tokenizer is a required input.
"""
feature_extractor_class = "MusicgenMelodyFeatureExtractor"
tokenizer_class = ("T5Tokenizer", "T5TokenizerFast")
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
# Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.get_decoder_prompt_ids
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
def __call__(self, audio=None, text=None, **kwargs):
"""
Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `audio`
and `kwargs` arguments to MusicgenMelodyFeatureExtractor's [`~MusicgenMelodyFeatureExtractor.__call__`] if `audio` is not
`None` to pre-process the audio. It also forwards the `text` and `kwargs` arguments to
PreTrainedTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not `None`. Please refer to the docstring of the above two methods for more information.
Args:
audio (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
of a NumPy array/PyTorch tensor, each audio should be a mono-stereo signal of shape (T), where T is the sample length of the audio.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the
tokenizer.
Returns:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **input_features** -- Audio input features to be fed to a model. Returned when `audio` is not `None`.
- **attention_mask** -- List of token indices specifying which tokens should be attended to by the model when `text` is not `None`.
When only `audio` is specified, returns the timestamps attention mask.
"""
sampling_rate = kwargs.pop("sampling_rate", None)
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if text is not None:
inputs = self.tokenizer(text, **kwargs)
if audio is not None:
audio_inputs = self.feature_extractor(audio, sampling_rate=sampling_rate, **kwargs)
if text is None:
return audio_inputs
elif audio is None:
return inputs
else:
inputs["input_features"] = audio_inputs["input_features"]
return inputs
# Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.batch_decode with padding_mask->attention_mask
def batch_decode(self, *args, **kwargs):
"""
This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids
from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's
[`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information.
"""
audio_values = kwargs.pop("audio", None)
attention_mask = kwargs.pop("attention_mask", None)
if len(args) > 0:
audio_values = args[0]
args = args[1:]
if audio_values is not None:
return self._decode_audio(audio_values, attention_mask=attention_mask)
else:
return self.tokenizer.batch_decode(*args, **kwargs)
# Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor._decode_audio with padding_mask->attention_mask
def _decode_audio(self, audio_values, attention_mask: Optional = None) -> list[np.ndarray]:
"""
This method strips any padding from the audio values to return a list of numpy audio arrays.
"""
audio_values = to_numpy(audio_values)
bsz, channels, seq_len = audio_values.shape
if attention_mask is None:
return list(audio_values)
attention_mask = to_numpy(attention_mask)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
difference = seq_len - attention_mask.shape[-1]
padding_value = 1 - self.feature_extractor.padding_value
attention_mask = np.pad(attention_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value)
audio_values = audio_values.tolist()
for i in range(bsz):
sliced_audio = np.asarray(audio_values[i])[
attention_mask[i][None, :] != self.feature_extractor.padding_value
]
audio_values[i] = sliced_audio.reshape(channels, -1)
return audio_values
def get_unconditional_inputs(self, num_samples=1, return_tensors="pt"):
"""
Helper function to get null inputs for unconditional generation, enabling the model to be used without the
feature extractor or tokenizer.
Args:
num_samples (int, *optional*):
Number of audio samples to unconditionally generate.
Example:
```python
>>> from transformers import MusicgenMelodyForConditionalGeneration, MusicgenMelodyProcessor
>>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody")
>>> # get the unconditional (or 'null') inputs for the model
>>> processor = MusicgenMelodyProcessor.from_pretrained("facebook/musicgen-melody")
>>> unconditional_inputs = processor.get_unconditional_inputs(num_samples=1)
>>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)
```"""
inputs = self.tokenizer([""] * num_samples, return_tensors=return_tensors, return_attention_mask=True)
inputs["attention_mask"][:] = 0
return inputs
__all__ = ["MusicgenMelodyProcessor"]
| transformers/src/transformers/models/musicgen_melody/processing_musicgen_melody.py/0 | {
"file_path": "transformers/src/transformers/models/musicgen_melody/processing_musicgen_melody.py",
"repo_id": "transformers",
"token_count": 3078
} | 518 |
# Copyright 2024 EleutherAI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
import shutil
from pathlib import Path
import torch
import yaml
from tokenizers import Tokenizer
from transformers import OlmoConfig, OlmoForCausalLM
from transformers.models.gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
"""
Sample usage:
```
python src/transformers/models/olmo/convert_olmo_weights_to_hf.py \
--input_dir /path/to/downloaded/olmo/weights --model_size 7B --output_dir /output/path
```
Thereafter, models can be loaded via:
```py
from transformers import OlmoForCausalLM, AutoTokenizer
model = OlmoForCausalLM.from_pretrained("/output/path")
tokenizer = AutoTokenizer.from_pretrained("/output/path")
```
Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
"""
def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def read_json(path):
with open(path, "r") as f:
return json.load(f)
def write_json(text, path):
with open(path, "w") as f:
json.dump(text, f)
def write_model(model_path, input_base_path, tokenizer_path=None, safe_serialization=True, fix_eos_token_id=True):
os.makedirs(model_path, exist_ok=True)
tmp_model_path = os.path.join(model_path, "tmp")
os.makedirs(tmp_model_path, exist_ok=True)
config_path = Path(input_base_path) / "config.yaml"
olmo_config = yaml.safe_load(config_path.read_text())["model"]
n_layers = olmo_config["n_layers"]
n_heads = olmo_config["n_heads"]
dim = olmo_config["d_model"]
dims_per_head = dim // n_heads
base = 10000.0
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
max_position_embeddings = olmo_config["max_sequence_length"]
vocab_size = olmo_config.get("embedding_size", olmo_config["vocab_size"])
if olmo_config.get("n_kv_heads", None) is not None:
num_key_value_heads = olmo_config["n_kv_heads"] # for GQA / MQA
elif olmo_config["multi_query_attention"]: # compatibility with other checkpoints
num_key_value_heads = 1
else:
num_key_value_heads = n_heads
print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
loaded = torch.load(os.path.join(input_base_path, "model.pt"), map_location="cpu", weights_only=True)
param_count = 0
index_dict = {"weight_map": {}}
for layer_i in range(n_layers):
filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
# Unsharded
# TODO: Layernorm stuff
# TODO: multi query attention
fused_dims = [dim, dims_per_head * num_key_value_heads, dims_per_head * num_key_value_heads]
q_proj_weight, k_proj_weight, v_proj_weight = torch.split(
loaded[f"transformer.blocks.{layer_i}.att_proj.weight"], fused_dims, dim=0
)
up_proj_weight, gate_proj_weight = torch.chunk(
loaded[f"transformer.blocks.{layer_i}.ff_proj.weight"], 2, dim=0
)
state_dict = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": q_proj_weight,
f"model.layers.{layer_i}.self_attn.k_proj.weight": k_proj_weight,
f"model.layers.{layer_i}.self_attn.v_proj.weight": v_proj_weight,
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"transformer.blocks.{layer_i}.attn_out.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": gate_proj_weight,
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"transformer.blocks.{layer_i}.ff_out.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": up_proj_weight,
}
state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
# Unsharded
# TODO: Deal with weight-tying
state_dict = {
"model.embed_tokens.weight": loaded["transformer.wte.weight"],
"lm_head.weight": loaded["transformer.ff_out.weight"]
if "transformer.ff_out.weight" in loaded
else loaded["transformer.wte.weight"],
}
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
# Write configs
index_dict["metadata"] = {"total_size": param_count * 2}
write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
if olmo_config.get("mlp_hidden_size", None) is not None:
intermediate_size = olmo_config["mlp_hidden_size"] // 2
else:
intermediate_size = (dim * olmo_config["mlp_ratio"]) // 2
config = OlmoConfig(
vocab_size=vocab_size,
hidden_size=dim,
intermediate_size=intermediate_size,
num_hidden_layers=n_layers,
num_attention_heads=n_heads,
num_key_value_heads=num_key_value_heads,
max_position_embeddings=max_position_embeddings,
pad_token_id=olmo_config["pad_token_id"],
bos_token_id=None,
eos_token_id=olmo_config["eos_token_id"],
tie_word_embeddings=olmo_config["weight_tying"],
rope_theta=base,
clip_qkv=olmo_config.get("clip_qkv"),
)
config.save_pretrained(tmp_model_path)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
if tokenizer_path is not None:
_write_tokenizer(model_path, config, tokenizer_path, fix_eos_token_id)
print("Loading the checkpoint in a OLMo model.")
model = OlmoForCausalLM.from_pretrained(tmp_model_path, dtype=torch.float32)
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format.")
model.save_pretrained(model_path, safe_serialization=safe_serialization)
shutil.rmtree(tmp_model_path)
def _write_tokenizer(
output_path: Path, config: OlmoConfig, input_tokenizer_path: Path, fix_eos_token_id: bool = True
) -> None:
print(f"Saving a {GPTNeoXTokenizerFast.__name__} to {output_path}.")
base_tokenizer = Tokenizer.from_file(str(input_tokenizer_path))
eos_token_id = config.eos_token_id if config.eos_token_id is not None else base_tokenizer.get_vocab_size() - 1
pad_token_id = config.pad_token_id if config.pad_token_id is not None else eos_token_id
if fix_eos_token_id and eos_token_id == 0:
# Fixing a bug in OLMo where eos token id was incorrectly set
print("Changing eos_token_id from 0 to 50279.")
eos_token_id = 50279
tokenizer = GPTNeoXTokenizerFast(
tokenizer_object=base_tokenizer,
eos_token=base_tokenizer.decode([eos_token_id], skip_special_tokens=False),
pad_token=base_tokenizer.decode([pad_token_id], skip_special_tokens=False),
unk_token=None,
bos_token=None,
)
tokenizer.save_pretrained(output_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
required=True,
help="Location of OLMo weights, which contains config.yaml and model.pt.",
)
parser.add_argument(
"--tokenizer_json_path",
default=None,
help="Location of OLMo tokenizer json file.",
)
parser.add_argument(
"--output_dir",
required=True,
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--no_fix_eos_token_id",
action="store_false",
dest="fix_eos_token_id",
help="If set, does not change eos token id from 0 to 50279 if it is 0. Changing 0 to 50279 is a bug fix, so use this option with care.",
)
parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.")
# Different OLMo versions used different default values for max_position_embeddings, hence the need to be able to specify which version is being used.
args = parser.parse_args()
write_model(
model_path=args.output_dir,
input_base_path=args.input_dir,
safe_serialization=args.safe_serialization,
tokenizer_path=args.tokenizer_json_path,
fix_eos_token_id=args.fix_eos_token_id,
)
if __name__ == "__main__":
main()
| transformers/src/transformers/models/olmo/convert_olmo_weights_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/olmo/convert_olmo_weights_to_hf.py",
"repo_id": "transformers",
"token_count": 3863
} | 519 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for OmDet-Turbo.
"""
import warnings
from typing import TYPE_CHECKING, Optional, Union
from ...feature_extraction_utils import BatchFeature
from ...image_transforms import center_to_corners_format
from ...image_utils import ImageInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import (
TensorType,
is_torch_available,
is_torchvision_available,
)
from ...utils.import_utils import requires
if TYPE_CHECKING:
from .modeling_omdet_turbo import OmDetTurboObjectDetectionOutput
class OmDetTurboTextKwargs(TextKwargs, total=False):
task: Optional[Union[str, list[str], TextInput, PreTokenizedInput]]
if is_torch_available():
import torch
if is_torchvision_available():
from torchvision.ops.boxes import batched_nms
class OmDetTurboProcessorKwargs(ProcessingKwargs, total=False):
text_kwargs: OmDetTurboTextKwargs
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": "max_length",
"truncation": True,
"max_length": 77,
"stride": 0,
"return_overflowing_tokens": False,
"return_special_tokens_mask": False,
"return_offsets_mapping": False,
"return_token_type_ids": False,
"return_length": False,
"verbose": True,
"task": None,
},
"images_kwargs": {},
}
class DictWithDeprecationWarning(dict):
message = (
"The `classes` key is deprecated for `OmDetTurboProcessor.post_process_grounded_object_detection` "
"output dict and will be removed in a 4.51.0 version. Please use `text_labels` instead."
)
def __getitem__(self, key):
if key == "classes":
warnings.warn(self.message, FutureWarning)
return super().__getitem__("text_labels")
return super().__getitem__(key)
def get(self, key, *args, **kwargs):
if key == "classes":
warnings.warn(self.message, FutureWarning)
return super().get("text_labels", *args, **kwargs)
return super().get(key, *args, **kwargs)
def clip_boxes(box, box_size: tuple[int, int]):
"""
Clip the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box (Tensor): The box to be clipped.
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(box).all(), "Box tensor contains infinite or NaN!"
height, width = box_size
x1 = box[:, 0].clamp(min=0, max=width)
y1 = box[:, 1].clamp(min=0, max=height)
x2 = box[:, 2].clamp(min=0, max=width)
y2 = box[:, 3].clamp(min=0, max=height)
box = torch.stack((x1, y1, x2, y2), dim=-1)
return box
def compute_score(boxes):
"""
Compute logit scores per class for each box (proposal) and an array of class indices
corresponding to each proposal, flattened across the proposal_num.
The indices in `classes` will later be used to filter and match the predicted classes
with the input class names.
"""
num_classes = boxes.shape[2]
proposal_num = boxes.shape[1]
scores = torch.sigmoid(boxes)
classes = torch.arange(num_classes, device=boxes.device).unsqueeze(0).repeat(proposal_num, 1).flatten(0, 1)
return scores, classes
def _post_process_boxes_for_image(
boxes: "torch.Tensor",
scores: "torch.Tensor",
labels: "torch.Tensor",
image_num_classes: int,
image_size: tuple[int, int],
threshold: float,
nms_threshold: float,
max_num_det: Optional[int] = None,
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]:
"""
Filter predicted results using given thresholds and NMS.
Args:
boxes (`torch.Tensor`):
A Tensor of predicted class-specific or class-agnostic boxes for the image.
Shape (num_queries, max_num_classes_in_batch * 4) if doing class-specific regression,
or (num_queries, 4) if doing class-agnostic regression.
scores (`torch.Tensor` of shape (num_queries, max_num_classes_in_batch + 1)):
A Tensor of predicted class scores for the image.
labels (`torch.Tensor` of shape (num_queries * (max_num_classes_in_batch + 1),)):
A Tensor of predicted labels for the image.
image_num_classes (`int`):
The number of classes queried for detection on the image.
image_size (`tuple[int, int]`):
A tuple of (height, width) for the image.
threshold (`float`):
Only return detections with a confidence score exceeding this threshold.
nms_threshold (`float`):
The threshold to use for box non-maximum suppression. Value in [0, 1].
max_num_det (`int`, *optional*):
The maximum number of detections to return. Default is None.
Returns:
Tuple: A tuple with the following:
"boxes" (Tensor): A tensor of shape (num_filtered_objects, 4), containing the predicted boxes in (x1, y1, x2, y2) format.
"scores" (Tensor): A tensor of shape (num_filtered_objects,), containing the predicted confidence scores for each detection.
"labels" (Tensor): A tensor of ids, where each id is the predicted class id for the corresponding detection
"""
# Filter by max number of detections
proposal_num = len(boxes) if max_num_det is None else max_num_det
scores_per_image, topk_indices = scores.flatten(0, 1).topk(proposal_num, sorted=False)
labels_per_image = labels[topk_indices]
boxes_per_image = boxes.view(-1, 1, 4).repeat(1, scores.shape[1], 1).view(-1, 4)
boxes_per_image = boxes_per_image[topk_indices]
# Convert and scale boxes to original image size
boxes_per_image = center_to_corners_format(boxes_per_image)
boxes_per_image = boxes_per_image * torch.tensor(image_size[::-1]).repeat(2).to(boxes_per_image.device)
# Filtering by confidence score
filter_mask = scores_per_image > threshold # R x K
score_keep = filter_mask.nonzero(as_tuple=False).view(-1)
boxes_per_image = boxes_per_image[score_keep]
scores_per_image = scores_per_image[score_keep]
labels_per_image = labels_per_image[score_keep]
# Ensure we did not overflow to non existing classes
filter_classes_mask = labels_per_image < image_num_classes
classes_keep = filter_classes_mask.nonzero(as_tuple=False).view(-1)
boxes_per_image = boxes_per_image[classes_keep]
scores_per_image = scores_per_image[classes_keep]
labels_per_image = labels_per_image[classes_keep]
# NMS
keep = batched_nms(boxes_per_image, scores_per_image, labels_per_image, nms_threshold)
boxes_per_image = boxes_per_image[keep]
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
# Clip to image size
boxes_per_image = clip_boxes(boxes_per_image, image_size)
return boxes_per_image, scores_per_image, labels_per_image
@requires(backends=("vision", "torchvision"))
class OmDetTurboProcessor(ProcessorMixin):
r"""
Constructs a OmDet-Turbo processor which wraps a Deformable DETR image processor and an AutoTokenizer into a
single processor.
[`OmDetTurboProcessor`] offers all the functionalities of [`DetrImageProcessor`] and
[`AutoTokenizer`]. See the docstring of [`~OmDetTurboProcessor.__call__`] and [`~OmDetTurboProcessor.decode`]
for more information.
Args:
image_processor (`DetrImageProcessor`):
An instance of [`DetrImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = ("DetrImageProcessor", "DetrImageProcessorFast")
tokenizer_class = "AutoTokenizer"
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
def __call__(
self,
images: ImageInput = None,
text: Optional[Union[list[str], list[list[str]]]] = None,
audio=None,
videos=None,
**kwargs: Unpack[OmDetTurboProcessorKwargs],
) -> BatchFeature:
"""
This method uses [*DetrImageProcessor.__call__] method to prepare image(s) for the model, and
[CLIPTokenizerFast.__call__] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255.
text (`Union[str, list[str], list[list[str]]]`):
The classes used to limit the scope of the open vocabulary detection. Expects a list of strings or a list
of list of strings. Batched classes can be of different lengths.
Examples: ["cat", "dog", "bird"], [["cat", "dog", "bird"], ["hat", "person"], ["car"]]
Kwargs:
task (`Union[str, list[str], TextInput, PreTokenizedInput]`):
The grounded text used to guide open vocabulary detection. Expects a single string or a list of strings.
Examples: "Detect a cat, a dog, and a bird.",[ "Detect everything.", "Detect trees and flowers."]
When not provided, the default task is "Detect [class1], [class2], [class3]" etc.
...
"""
if images is None or text is None:
raise ValueError("You have to specify both `images` and `text`")
output_kwargs = self._merge_kwargs(
OmDetTurboProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = text.strip(" ").split(",")
if not (len(text) and isinstance(text[0], (list, tuple))):
text = [text]
task = output_kwargs["text_kwargs"].pop("task", None)
if task is None:
task = ["Detect {}.".format(", ".join(text_single)) for text_single in text]
elif not isinstance(task, (list, tuple)):
task = [task]
encoding_image_processor = self.image_processor(images, **output_kwargs["images_kwargs"])
tasks_encoding = self.tokenizer(text=task, **output_kwargs["text_kwargs"])
classes = text
classes_structure = torch.tensor([len(class_single) for class_single in classes], dtype=torch.long)
classes_flattened = [class_single for class_batch in classes for class_single in class_batch]
classes_encoding = self.tokenizer(text=classes_flattened, **output_kwargs["text_kwargs"])
encoding = BatchFeature()
encoding.update({f"tasks_{key}": value for key, value in tasks_encoding.items()})
encoding.update({f"classes_{key}": value for key, value in classes_encoding.items()})
encoding.update({"classes_structure": classes_structure})
encoding.update(encoding_image_processor)
return encoding
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
tokenizer_input_names = [
"classes_attention_mask",
"tasks_attention_mask",
"tasks_input_ids",
"classes_input_ids",
"classes_structure",
]
return tokenizer_input_names + image_processor_input_names
def _get_default_image_size(self) -> tuple[int, int]:
height = (
self.image_processor.size["height"]
if "height" in self.image_processor.size
else self.image_processor.size["shortest_edge"]
)
width = (
self.image_processor.size["width"]
if "width" in self.image_processor.size
else self.image_processor.size["longest_edge"]
)
return height, width
def post_process_grounded_object_detection(
self,
outputs: "OmDetTurboObjectDetectionOutput",
text_labels: Optional[Union[list[str], list[list[str]]]] = None,
threshold: float = 0.3,
nms_threshold: float = 0.5,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
max_num_det: Optional[int] = None,
):
"""
Converts the raw output of [`OmDetTurboForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format and get the associated text class.
Args:
outputs ([`OmDetTurboObjectDetectionOutput`]):
Raw outputs of the model.
text_labels (Union[list[str], list[list[str]]], *optional*):
The input classes names. If not provided, `text_labels` will be set to `None` in `outputs`.
threshold (float, defaults to 0.3):
Only return detections with a confidence score exceeding this threshold.
nms_threshold (float, defaults to 0.5):
The threshold to use for box non-maximum suppression. Value in [0, 1].
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
max_num_det (`int`, *optional*):
The maximum number of detections to return.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, classes and boxes for an image
in the batch as predicted by the model.
"""
batch_size = len(outputs.decoder_coord_logits)
# Inputs consistency check for target sizes
if target_sizes is None:
height, width = self._get_default_image_size()
target_sizes = [(height, width)] * batch_size
if any(len(image_size) != 2 for image_size in target_sizes):
raise ValueError(
"Each element of target_sizes must contain the size (height, width) of each image of the batch"
)
if len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as output sequences")
# Inputs consistency check for text labels
if text_labels is not None and isinstance(text_labels[0], str):
text_labels = [text_labels]
if text_labels is not None and len(text_labels) != batch_size:
raise ValueError("Make sure that you pass in as many classes group as output sequences")
# Convert target_sizes to list for easier handling
if isinstance(target_sizes, torch.Tensor):
target_sizes = target_sizes.tolist()
batch_boxes = outputs.decoder_coord_logits
batch_logits = outputs.decoder_class_logits
batch_num_classes = outputs.classes_structure
batch_scores, batch_labels = compute_score(batch_logits)
results = []
for boxes, scores, image_size, image_num_classes in zip(
batch_boxes, batch_scores, target_sizes, batch_num_classes
):
boxes, scores, labels = _post_process_boxes_for_image(
boxes=boxes,
scores=scores,
labels=batch_labels,
image_num_classes=image_num_classes,
image_size=image_size,
threshold=threshold,
nms_threshold=nms_threshold,
max_num_det=max_num_det,
)
result = DictWithDeprecationWarning(
{"boxes": boxes, "scores": scores, "labels": labels, "text_labels": None}
)
results.append(result)
# Add text labels
if text_labels is not None:
for result, image_text_labels in zip(results, text_labels):
result["text_labels"] = [image_text_labels[idx] for idx in result["labels"]]
return results
__all__ = ["OmDetTurboProcessor"]
| transformers/src/transformers/models/omdet_turbo/processing_omdet_turbo.py/0 | {
"file_path": "transformers/src/transformers/models/omdet_turbo/processing_omdet_turbo.py",
"repo_id": "transformers",
"token_count": 6843
} | 520 |
# coding=utf-8
# Copyright 2022 The Metaseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OPT model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class OPTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the OPT
[facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50272):
Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`OPTModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
ffn_dim (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
do_layer_norm_before (`bool`, *optional*, defaults to `True`):
Whether to perform layer normalization before the attention block.
word_embed_proj_dim (`int`, *optional*):
`word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to
`hidden_size`.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more
details.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
enable_bias (`bool`, *optional*, defaults to `True`):
Whether or not if the linear layers in the attention blocks should use the bias term.
layer_norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
Whether or not if the layer norms should have learnable parameters.
Example:
```python
>>> from transformers import OPTConfig, OPTModel
>>> # Initializing a OPT facebook/opt-large style configuration
>>> configuration = OPTConfig()
>>> # Initializing a model (with random weights) from the facebook/opt-large style configuration
>>> model = OPTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "opt"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=50272,
hidden_size=768,
num_hidden_layers=12,
ffn_dim=3072,
max_position_embeddings=2048,
do_layer_norm_before=True,
_remove_final_layer_norm=False,
word_embed_proj_dim=None,
dropout=0.1,
attention_dropout=0.0,
num_attention_heads=12,
activation_function="relu",
layerdrop=0.0,
init_std=0.02,
use_cache=True,
pad_token_id=1,
bos_token_id=2,
eos_token_id=2,
enable_bias=True,
layer_norm_elementwise_affine=True,
**kwargs,
):
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.num_attention_heads = num_attention_heads
self.word_embed_proj_dim = word_embed_proj_dim if word_embed_proj_dim is not None else hidden_size
self.ffn_dim = ffn_dim
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_function = activation_function
self.init_std = init_std
self.layerdrop = layerdrop
self.use_cache = use_cache
self.do_layer_norm_before = do_layer_norm_before
# We keep these variables at `True` for backward compatibility.
self.enable_bias = enable_bias
self.layer_norm_elementwise_affine = layer_norm_elementwise_affine
# Note that the only purpose of `_remove_final_layer_norm` is to keep backward compatibility
# with checkpoints that have been fine-tuned before transformers v4.20.1
# see https://github.com/facebookresearch/metaseq/pull/164
self._remove_final_layer_norm = _remove_final_layer_norm
__all__ = ["OPTConfig"]
| transformers/src/transformers/models/opt/configuration_opt.py/0 | {
"file_path": "transformers/src/transformers/models/opt/configuration_opt.py",
"repo_id": "transformers",
"token_count": 2514
} | 521 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for OWLv2."""
import warnings
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_to_corners_format,
pad,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
infer_channel_dimension_format,
is_scaled_image,
make_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...utils import (
TensorType,
filter_out_non_signature_kwargs,
is_scipy_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
if is_torch_available():
import torch
if is_vision_available():
import PIL
if is_scipy_available():
from scipy import ndimage as ndi
if TYPE_CHECKING:
from .modeling_owlv2 import Owlv2ObjectDetectionOutput
logger = logging.get_logger(__name__)
def _scale_boxes(boxes, target_sizes):
"""
Scale batch of bounding boxes to the target sizes.
Args:
boxes (`torch.Tensor` of shape `(batch_size, num_boxes, 4)`):
Bounding boxes to scale. Each box is expected to be in (x1, y1, x2, y2) format.
target_sizes (`list[tuple[int, int]]` or `torch.Tensor` of shape `(batch_size, 2)`):
Target sizes to scale the boxes to. Each target size is expected to be in (height, width) format.
Returns:
`torch.Tensor` of shape `(batch_size, num_boxes, 4)`: Scaled bounding boxes.
"""
if isinstance(target_sizes, (list, tuple)):
image_height = torch.tensor([i[0] for i in target_sizes])
image_width = torch.tensor([i[1] for i in target_sizes])
elif isinstance(target_sizes, torch.Tensor):
image_height, image_width = target_sizes.unbind(1)
else:
raise TypeError("`target_sizes` must be a list, tuple or torch.Tensor")
# for owlv2 image is padded to max size unlike owlvit, that's why we have to scale boxes to max size
max_size = torch.max(image_height, image_width)
scale_factor = torch.stack([max_size, max_size, max_size, max_size], dim=1)
scale_factor = scale_factor.unsqueeze(1).to(boxes.device)
boxes = boxes * scale_factor
return boxes
# Copied from transformers.models.owlvit.image_processing_owlvit._upcast
def _upcast(t):
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.is_floating_point():
return t if t.dtype in (torch.float32, torch.float64) else t.float()
else:
return t if t.dtype in (torch.int32, torch.int64) else t.int()
# Copied from transformers.models.owlvit.image_processing_owlvit.box_area
def box_area(boxes):
"""
Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.
Args:
boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
< x2` and `0 <= y1 < y2`.
Returns:
`torch.FloatTensor`: a tensor containing the area for each box.
"""
boxes = _upcast(boxes)
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# Copied from transformers.models.owlvit.image_processing_owlvit.box_iou
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2]
inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def _preprocess_resize_output_shape(image, output_shape):
"""Validate resize output shape according to input image.
Args:
image (`np.ndarray`):
Image to be resized.
output_shape (`iterable`):
Size of the generated output image `(rows, cols[, ...][, dim])`. If `dim` is not provided, the number of
channels is preserved.
Returns
image (`np.ndarray`):
The input image, but with additional singleton dimensions appended in the case where `len(output_shape) >
input.ndim`.
output_shape (`Tuple`):
The output shape converted to tuple.
Raises ------ ValueError:
If output_shape length is smaller than the image number of dimensions.
Notes ----- The input image is reshaped if its number of dimensions is not equal to output_shape_length.
"""
output_shape = tuple(output_shape)
output_ndim = len(output_shape)
input_shape = image.shape
if output_ndim > image.ndim:
# append dimensions to input_shape
input_shape += (1,) * (output_ndim - image.ndim)
image = np.reshape(image, input_shape)
elif output_ndim == image.ndim - 1:
# multichannel case: append shape of last axis
output_shape = output_shape + (image.shape[-1],)
elif output_ndim < image.ndim:
raise ValueError("output_shape length cannot be smaller than the image number of dimensions")
return image, output_shape
def _clip_warp_output(input_image, output_image):
"""Clip output image to range of values of input image.
Note that this function modifies the values of *output_image* in-place.
Taken from:
https://github.com/scikit-image/scikit-image/blob/b4b521d6f0a105aabeaa31699949f78453ca3511/skimage/transform/_warps.py#L640.
Args:
input_image : ndarray
Input image.
output_image : ndarray
Output image, which is modified in-place.
"""
min_val = np.min(input_image)
if np.isnan(min_val):
# NaNs detected, use NaN-safe min/max
min_func = np.nanmin
max_func = np.nanmax
min_val = min_func(input_image)
else:
min_func = np.min
max_func = np.max
max_val = max_func(input_image)
output_image = np.clip(output_image, min_val, max_val)
return output_image
class Owlv2ImageProcessor(BaseImageProcessor):
r"""
Constructs an OWLv2 image processor.
Args:
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to a square with gray pixels on the bottom and the right. Can be overridden by
`do_pad` in the `preprocess` method.
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
by `do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 960, "width": 960}`):
Size to resize the image to. Can be overridden by `size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling method to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_pad: bool = True,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
self.do_resize = do_resize
self.size = size if size is not None else {"height": 960, "width": 960}
self.resample = resample
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
def pad(
self,
image: np.array,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Pad an image to a square with gray pixels on the bottom and the right, as per the original OWLv2
implementation.
Args:
image (`np.ndarray`):
Image to pad.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input
image.
"""
height, width = get_image_size(image)
size = max(height, width)
image = pad(
image=image,
padding=((0, size - height), (0, size - width)),
constant_values=0.5,
data_format=data_format,
input_data_format=input_data_format,
)
return image
def resize(
self,
image: np.ndarray,
size: dict[str, int],
anti_aliasing: bool = True,
anti_aliasing_sigma=None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image as per the original implementation.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary containing the height and width to resize the image to.
anti_aliasing (`bool`, *optional*, defaults to `True`):
Whether to apply anti-aliasing when downsampling the image.
anti_aliasing_sigma (`float`, *optional*, defaults to `None`):
Standard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated
automatically.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input
image.
"""
requires_backends(self, "scipy")
output_shape = (size["height"], size["width"])
image = to_channel_dimension_format(image, ChannelDimension.LAST)
image, output_shape = _preprocess_resize_output_shape(image, output_shape)
input_shape = image.shape
factors = np.divide(input_shape, output_shape)
# Translate modes used by np.pad to those used by scipy.ndimage
ndi_mode = "mirror"
cval = 0
order = 1
if anti_aliasing:
if anti_aliasing_sigma is None:
anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2)
else:
anti_aliasing_sigma = np.atleast_1d(anti_aliasing_sigma) * np.ones_like(factors)
if np.any(anti_aliasing_sigma < 0):
raise ValueError("Anti-aliasing standard deviation must be greater than or equal to zero")
elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)):
warnings.warn(
"Anti-aliasing standard deviation greater than zero but not down-sampling along all axes"
)
filtered = ndi.gaussian_filter(image, anti_aliasing_sigma, cval=cval, mode=ndi_mode)
else:
filtered = image
zoom_factors = [1 / f for f in factors]
out = ndi.zoom(filtered, zoom_factors, order=order, mode=ndi_mode, cval=cval, grid_mode=True)
image = _clip_warp_output(image, out)
image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST)
image = (
to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
)
return image
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_pad: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to a square with gray pixels on the bottom and the right.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size to resize the image to.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_pad = do_pad if do_pad is not None else self.do_pad
do_resize = do_resize if do_resize is not None else self.do_resize
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size = size if size is not None else self.size
size = get_size_dict(size) # for BC
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
# Here, pad and resize methods are different from the rest of image processors
# as they don't have any resampling in resize()
# or pad size in pad() (the maximum of (height, width) is taken instead).
# hence, these arguments don't need to be passed in validate_preprocess_arguments.
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
size=size,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_pad:
images = [self.pad(image=image, input_data_format=input_data_format) for image in images]
if do_resize:
images = [
self.resize(
image=image,
size=size,
input_data_format=input_data_format,
)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_object_detection with OwlViT->Owlv2
def post_process_object_detection(
self,
outputs: "Owlv2ObjectDetectionOutput",
threshold: float = 0.1,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
):
"""
Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`Owlv2ObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.1):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the following keys:
- "scores": The confidence scores for each predicted box on the image.
- "labels": Indexes of the classes predicted by the model on the image.
- "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.
"""
batch_logits, batch_boxes = outputs.logits, outputs.pred_boxes
batch_size = len(batch_logits)
if target_sizes is not None and len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as images")
# batch_logits of shape (batch_size, num_queries, num_classes)
batch_class_logits = torch.max(batch_logits, dim=-1)
batch_scores = torch.sigmoid(batch_class_logits.values)
batch_labels = batch_class_logits.indices
# Convert to [x0, y0, x1, y1] format
batch_boxes = center_to_corners_format(batch_boxes)
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
batch_boxes = _scale_boxes(batch_boxes, target_sizes)
results = []
for scores, labels, boxes in zip(batch_scores, batch_labels, batch_boxes):
keep = scores > threshold
scores = scores[keep]
labels = labels[keep]
boxes = boxes[keep]
results.append({"scores": scores, "labels": labels, "boxes": boxes})
return results
# Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_image_guided_detection
def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None):
"""
Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO
api.
Args:
outputs ([`OwlViTImageGuidedObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.0):
Minimum confidence threshold to use to filter out predicted boxes.
nms_threshold (`float`, *optional*, defaults to 0.3):
IoU threshold for non-maximum suppression of overlapping boxes.
target_sizes (`torch.Tensor`, *optional*):
Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to
None, predictions will not be unnormalized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model. All labels are set to None as
`OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection.
"""
logits, target_boxes = outputs.logits, outputs.target_pred_boxes
if target_sizes is not None and len(logits) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
if target_sizes is not None and target_sizes.shape[1] != 2:
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
probs = torch.max(logits, dim=-1)
scores = torch.sigmoid(probs.values)
# Convert to [x0, y0, x1, y1] format
target_boxes = center_to_corners_format(target_boxes)
# Apply non-maximum suppression (NMS)
if nms_threshold < 1.0:
for idx in range(target_boxes.shape[0]):
for i in torch.argsort(-scores[idx]):
if not scores[idx][i]:
continue
ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0]
ious[i] = -1.0 # Mask self-IoU.
scores[idx][ious > nms_threshold] = 0.0
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
target_boxes = _scale_boxes(target_boxes, target_sizes)
# Compute box display alphas based on prediction scores
results = []
alphas = torch.zeros_like(scores)
for idx in range(target_boxes.shape[0]):
# Select scores for boxes matching the current query:
query_scores = scores[idx]
if not query_scores.nonzero().numel():
continue
# Apply threshold on scores before scaling
query_scores[query_scores < threshold] = 0.0
# Scale box alpha such that the best box for each query has alpha 1.0 and the worst box has alpha 0.1.
# All other boxes will either belong to a different query, or will not be shown.
max_score = torch.max(query_scores) + 1e-6
query_alphas = (query_scores - (max_score * 0.1)) / (max_score * 0.9)
query_alphas = torch.clip(query_alphas, 0.0, 1.0)
alphas[idx] = query_alphas
mask = alphas[idx] > 0
box_scores = alphas[idx][mask]
boxes = target_boxes[idx][mask]
results.append({"scores": box_scores, "labels": None, "boxes": boxes})
return results
__all__ = ["Owlv2ImageProcessor"]
| transformers/src/transformers/models/owlv2/image_processing_owlv2.py/0 | {
"file_path": "transformers/src/transformers/models/owlv2/image_processing_owlv2.py",
"repo_id": "transformers",
"token_count": 11865
} | 522 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert PaliGemma checkpoints from the original repository."""
import argparse
import collections
import torch
from numpy import load
from transformers import (
AutoTokenizer,
GemmaTokenizer,
GemmaTokenizerFast,
PaliGemmaConfig,
PaliGemmaForConditionalGeneration,
PaliGemmaProcessor,
SiglipImageProcessor,
)
from transformers.tokenization_utils_base import AddedToken
from transformers.utils import logging
device = "cuda" # "cpu"
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
# TODO add sequence length variations here
PALIGEMMA_VARIANTS = ["2b-test", "3b-224px", "3b-448px", "3b-896px"]
def get_paligemma_config(variant: str, precision: str):
config = {
"image_token_id": None,
"pad_token_id": 0,
"bos_token_id": 2,
"eos_token_id": 1,
}
image_sizes = {"2b-test": 224, "3b-224px": 224, "3b-448px": 448, "3b-896px": 896}
if variant in PALIGEMMA_VARIANTS:
image_size = image_sizes[variant]
patch_size = 14
num_image_tokens = (image_size**2) // (patch_size**2)
config["image_token_id"] = 257152 if variant != "2b-test" else 256000
text_config = {
"vocab_size": 257152,
"num_hidden_layers": 18,
"num_key_value_heads": 1,
"head_dim": 256,
"dtype": precision,
"hidden_size": 2048,
"hidden_activation": "gelu_pytorch_tanh",
"num_attention_heads": 8,
"intermediate_size": 16384,
"is_encoder_decoder": False,
}
vision_config = {
"dtype": precision,
"image_size": image_size,
"patch_size": patch_size,
"num_image_tokens": num_image_tokens,
"hidden_size": 1152,
"intermediate_size": 4304,
"num_hidden_layers": 27,
"num_attention_heads": 16,
"projector_hidden_act": "gelu_fast",
"vision_use_head": False,
}
final_config = PaliGemmaConfig(text_config=text_config, vision_config=vision_config, **config)
else:
raise ValueError(f"Identifier {variant} not supported. Available: {PALIGEMMA_VARIANTS}")
return final_config
def slice_state_dict(state_dict, config):
# fmt: off
# patch embeddings
state_dict["vision_tower.vision_model.embeddings.patch_embedding.weight"] = state_dict.pop("img/embedding/kernel").transpose(
3, 2, 0, 1
)
state_dict["vision_tower.vision_model.embeddings.patch_embedding.bias"] = state_dict.pop("img/embedding/bias")
# positional embeddings
state_dict["vision_tower.vision_model.embeddings.position_embedding.weight"] = state_dict.pop("img/pos_embedding").reshape(
-1, config.vision_config.hidden_size
)
# extract vision layers to be sliced at index 0. There are 27 layers in the base model.
encoderblock_layernorm0_scale = state_dict.pop("img/Transformer/encoderblock/LayerNorm_0/scale")
encoderblock_layernorm0_bias = state_dict.pop("img/Transformer/encoderblock/LayerNorm_0/bias")
encoderblock_layernorm1_scale = state_dict.pop("img/Transformer/encoderblock/LayerNorm_1/scale")
encoderblock_layernorm1_bias = state_dict.pop("img/Transformer/encoderblock/LayerNorm_1/bias")
encoderblock_mlp_dense0_kernel= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_0/kernel")
encoderblock_mlp_dense0_bias= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_0/bias")
encoderblock_mlp_dense1_kernel= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_1/kernel")
encoderblock_mlp_dense1_bias= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_1/bias")
encoderblock_attention_0_key_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/kernel")
encoderblock_attention_0_key_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/bias")
encoderblock_attention_0_value_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/kernel")
encoderblock_attention_0_value_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/bias")
encoderblock_attention_0_query_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/kernel")
encoderblock_attention_0_query_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/bias")
encoderblock_attention_0_out_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/kernel")
encoderblock_attention_0_out_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/bias")
for i in range(config.vision_config.num_hidden_layers):
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm1.weight"] = encoderblock_layernorm0_scale[i].transpose()
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm1.bias"] = encoderblock_layernorm0_bias[i]
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm2.weight"] = encoderblock_layernorm1_scale[i].transpose()
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm2.bias"] = encoderblock_layernorm1_bias[i]
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.weight"] = encoderblock_mlp_dense0_kernel[i].transpose()
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.bias"] = encoderblock_mlp_dense0_bias[i]
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.weight"] = encoderblock_mlp_dense1_kernel[i].transpose()
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.bias"] = encoderblock_mlp_dense1_bias[i]
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.weight"] = encoderblock_attention_0_key_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose()
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.bias"] = encoderblock_attention_0_key_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1)
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.weight"] = encoderblock_attention_0_value_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose()
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.bias"] = encoderblock_attention_0_value_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1)
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.weight"] = encoderblock_attention_0_query_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose()
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.bias"] = encoderblock_attention_0_query_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1)
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.weight"] = encoderblock_attention_0_out_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose()
state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.bias"] = encoderblock_attention_0_out_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1)
state_dict["vision_tower.vision_model.post_layernorm.weight"] = state_dict.pop("img/Transformer/encoder_norm/scale").transpose()
state_dict["vision_tower.vision_model.post_layernorm.bias"] = state_dict.pop("img/Transformer/encoder_norm/bias")
# multimodal projector
state_dict['multi_modal_projector.linear.weight'] = state_dict.pop("img/head/kernel").transpose()
state_dict['multi_modal_projector.linear.bias'] = state_dict.pop("img/head/bias")
# text decoder (gemma)
embedding_vector = state_dict.pop("llm/embedder/input_embedding")
state_dict["language_model.model.embed_tokens.weight"] = embedding_vector
# pop the einsum attention + mlp representations. There are 18 layers in gemma-2b.
llm_attention_attn_vec_einsum = state_dict.pop("llm/layers/attn/attn_vec_einsum/w")
llm_attention_kv_einsum = state_dict.pop("llm/layers/attn/kv_einsum/w")
llm_attention_q_einsum = state_dict.pop("llm/layers/attn/q_einsum/w")
llm_mlp_gating_einsum = state_dict.pop("llm/layers/mlp/gating_einsum")
llm_mlp_linear = state_dict.pop("llm/layers/mlp/linear")
# TODO verify correctness of layer norm loading
llm_input_layernorm = state_dict.pop("llm/layers/pre_attention_norm/scale")
llm_post_attention_layernorm = state_dict.pop("llm/layers/pre_ffw_norm/scale")
for i in range(config.text_config.num_hidden_layers):
# llm_attention_q_einsum[i].shape = (8, 2048, 256)
q_proj_weight_reshaped = llm_attention_q_einsum[i].transpose(0, 2, 1).reshape(config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size)
state_dict[f"language_model.model.layers.{i}.self_attn.q_proj.weight"] = q_proj_weight_reshaped
# llm_attention_kv_einsum[i, 0, 0].shape = (2048, 256)
k_proj_weight_reshaped = llm_attention_kv_einsum[i, 0, 0].transpose()
state_dict[f"language_model.model.layers.{i}.self_attn.k_proj.weight"] = k_proj_weight_reshaped
# llm_attention_kv_einsum[i, 1, 0].shape = (2048, 256)
v_proj_weight_reshaped = llm_attention_kv_einsum[i, 1, 0].transpose()
state_dict[f"language_model.model.layers.{i}.self_attn.v_proj.weight"] = v_proj_weight_reshaped
# output projection.
# llm_attention_attn_vec_einsum[i].shape = (8, 256, 2048)
o_proj_weight_reshaped = llm_attention_attn_vec_einsum[i].transpose(2, 0, 1).reshape(config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size)
state_dict[f"language_model.model.layers.{i}.self_attn.o_proj.weight"] = o_proj_weight_reshaped
# mlp layers
gate_proj_weight = llm_mlp_gating_einsum[i, 0]
state_dict[f"language_model.model.layers.{i}.mlp.gate_proj.weight"] = gate_proj_weight.transpose()
up_proj_weight = llm_mlp_gating_einsum[i, 1]
state_dict[f"language_model.model.layers.{i}.mlp.up_proj.weight"] = up_proj_weight.transpose()
state_dict[f"language_model.model.layers.{i}.mlp.down_proj.weight"] = llm_mlp_linear[i].transpose()
state_dict[f"language_model.model.layers.{i}.input_layernorm.weight"] = llm_input_layernorm[i]
state_dict[f"language_model.model.layers.{i}.post_attention_layernorm.weight"] = llm_post_attention_layernorm[i]
state_dict["language_model.model.norm.weight"] = state_dict.pop("llm/final_norm/scale")
state_dict["language_model.lm_head.weight"] = embedding_vector # weights are tied.
# fmt: on
for key, value in state_dict.items():
state_dict[key] = torch.from_numpy(value)
return state_dict
def flatten_nested_dict(params, parent_key="", sep="/"):
items = []
for k, v in params.items():
k = k.removeprefix("params/")
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten_nested_dict(v, parent_key=new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
@torch.no_grad()
def convert_paligemma_checkpoint(
checkpoint_path,
tokenizer_model_file,
pytorch_dump_folder_path,
variant: str,
precision: str,
do_convert_weights=False,
):
"""
Read checkpoints from flax npz files, rename/reshape, send result to state dict and verify logits if needed.
"""
config = get_paligemma_config(variant, precision=precision)
if do_convert_weights:
if variant == "2b-test":
# for the test model, the vocabulary was smaller
tokenizer_id = "google/gemma-2b"
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
else:
tokenizer_class = GemmaTokenizer if GemmaTokenizerFast is None else GemmaTokenizerFast
tokenizer = tokenizer_class(tokenizer_model_file)
image_token = AddedToken("<image>", normalized=False, special=True)
tokens_to_add = {"additional_special_tokens": [image_token]}
tokenizer.add_special_tokens(tokens_to_add)
# tokenizer.padding_side = 'right' # uncomment for testing purposes only.
image_processor = SiglipImageProcessor.from_pretrained("google/siglip-so400m-patch14-384")
image_processor.size = {"width": config.vision_config.image_size, "height": config.vision_config.image_size}
image_processor.image_seq_length = config.vision_config.num_image_tokens
processor = PaliGemmaProcessor(image_processor=image_processor, tokenizer=tokenizer)
data = load(checkpoint_path)
state_dict = flatten_nested_dict(data)
del data
state_dict_transformers = slice_state_dict(state_dict, config)
del state_dict
model = PaliGemmaForConditionalGeneration(config).to(device).eval()
model.load_state_dict(state_dict_transformers)
del state_dict_transformers
else:
processor = PaliGemmaProcessor.from_pretrained(pytorch_dump_folder_path)
model = (
PaliGemmaForConditionalGeneration.from_pretrained(pytorch_dump_folder_path, attn_implementation="sdpa")
.to(device)
.eval()
)
model.config.text_config._attn_implementation = "sdpa"
# model expansion to get random embeds of image tokens
pad_shape = 64 # for performance reasons
pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data
mu = torch.mean(pre_expansion_embeddings, dim=0).float()
n = pre_expansion_embeddings.size()[0]
sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n
dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma)
# We add an image token so we resize the model
model.resize_token_embeddings(config.text_config.vocab_size + 2, pad_shape)
model.language_model.model.embed_tokens.weight.data[257152:] = torch.stack(
tuple(dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[257152:].shape[0])),
dim=0,
)
model.language_model.lm_head.weight.data[257152:] = torch.stack(
tuple(dist.sample() for _ in range(model.language_model.lm_head.weight.data[257152:].shape[0])),
dim=0,
)
model.save_pretrained(pytorch_dump_folder_path, max_shard_size="2GB", safe_serialization=True)
processor.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
required=True,
type=str,
help="Path to the .npz checkpoint",
)
parser.add_argument(
"--tokenizer_model_file",
required=True,
type=str,
help="Path to the sentencepiece tokenizer.model file",
)
parser.add_argument(
"--pytorch_dump_folder_path",
required=True,
type=str,
help="Path to the output directory where model and processor will be saved.",
)
parser.add_argument(
"--precision",
choices=["float32", "bfloat16", "float16"],
type=str,
help="Precision identifier for model conversion - should match the base checkpoint precision.",
)
parser.add_argument(
"--variant",
default="2b-test",
choices=PALIGEMMA_VARIANTS,
type=str,
help="String identifier of the paligemma variant to convert.",
)
parser.add_argument(
"--do_convert_weights", action="store_true", help="Whether or not to reload and convert the weights."
)
args = parser.parse_args()
convert_paligemma_checkpoint(
checkpoint_path=args.checkpoint_path,
tokenizer_model_file=args.tokenizer_model_file,
pytorch_dump_folder_path=args.pytorch_dump_folder_path,
variant=args.variant,
precision=args.precision,
do_convert_weights=args.do_convert_weights,
)
| transformers/src/transformers/models/paligemma/convert_paligemma_weights_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/paligemma/convert_paligemma_weights_to_hf.py",
"repo_id": "transformers",
"token_count": 7049
} | 523 |
# coding=utf-8
# Copyright 2020 Google and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for model PEGASUS."""
import os
from shutil import copyfile
from typing import Optional
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
PegasusTokenizer = None
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
class PegasusTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" PEGASUS tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
The token used for masking single token values. This is the token used when training this model with masked
language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization](https://huggingface.co/papers/1912.08777).
mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
The token used for masking whole target sentences. This is the token used when training this model with gap
sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
Abstractive Summarization](https://huggingface.co/papers/1912.08777).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
<unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
that uses the tokens 2 - 104 only for pretraining
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = PegasusTokenizer
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
pad_token="<pad>",
eos_token="</s>",
unk_token="<unk>",
mask_token="<mask_2>",
mask_token_sent="<mask_1>",
additional_special_tokens=None,
offset=103, # entries 2 - 104 are only used for pretraining
**kwargs,
):
self.offset = offset
if additional_special_tokens is not None:
if not isinstance(additional_special_tokens, list):
raise TypeError(
f"additional_special_tokens should be of type {type(list)}, but is"
f" {type(additional_special_tokens)}"
)
additional_special_tokens_extended = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1)
]
if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}."
)
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
# pegasus was design to support changing the index of the first tokens. If one of the padding/eos/unk/mask token
# is different from default, we must rebuild the vocab
from_slow = kwargs.pop("from_slow", None)
from_slow = from_slow or str(pad_token) != "<pad>" or str(eos_token) != "</s>" or str(unk_token) != "<unk>"
kwargs.pop("added_tokens_decoder", {})
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
pad_token=pad_token,
eos_token=eos_token,
unk_token=unk_token,
mask_token=mask_token,
mask_token_sent=mask_token_sent,
offset=offset,
additional_special_tokens=additional_special_tokens,
from_slow=from_slow,
**kwargs,
)
self.vocab_file = vocab_file
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}"
)
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(
self, token_ids_0: list, token_ids_1: Optional[list] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""Get list where entries are [1] if a token is [eos] or [pad] else 0."""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [1]
else:
return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]:
"""
Build model inputs from a sequence by adding eos to the end. no bos token is added to the front.
- single sequence: `X </s>`
- pair of sequences: `A B </s>` (not intended use)
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_0 + token_ids_1 + [self.eos_token_id]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
__all__ = ["PegasusTokenizerFast"]
| transformers/src/transformers/models/pegasus/tokenization_pegasus_fast.py/0 | {
"file_path": "transformers/src/transformers/models/pegasus/tokenization_pegasus_fast.py",
"repo_id": "transformers",
"token_count": 4059
} | 524 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/perception_lm/modular_perception_lm.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_perception_lm.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 Meta Platforms, Inc. and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ...generation import GenerationMixin
from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, can_return_tuple
from ..auto import AutoModel
from .configuration_perception_lm import PerceptionLMConfig
class PerceptionLMAdaptiveAvgPooling(nn.Module):
def __init__(self, pooling_ratio=2):
super().__init__()
self.pooling_ratio = pooling_ratio
def forward(self, hidden_states):
b, num_tokens, c = hidden_states.shape
h = int(math.sqrt(num_tokens))
if h * h != num_tokens:
raise ValueError(f"num_tokens {num_tokens} is expected to be a square number")
shape = (h // self.pooling_ratio, h // self.pooling_ratio)
hidden_states = hidden_states.permute(0, 2, 1).reshape(b, -1, h, h)
hidden_states = F.adaptive_avg_pool2d(hidden_states, shape)
hidden_states = hidden_states.flatten(2).transpose(1, 2)
return hidden_states
class PerceptionLMMultiModalProjector(nn.Module):
def __init__(self, config: PerceptionLMConfig):
super().__init__()
input_size = config.vision_config.model_args["embed_dim"]
output_size = config.text_config.hidden_size
self.linear_1 = nn.Linear(
in_features=input_size,
out_features=output_size,
bias=True,
)
self.gelu = nn.GELU()
self.linear_2 = nn.Linear(
in_features=output_size,
out_features=output_size,
bias=True,
)
self.pooling = (
PerceptionLMAdaptiveAvgPooling(config.projector_pooling_ratio)
if config.projector_pooling_ratio > 1
else nn.Identity()
)
def forward(self, features):
features = features.permute(1, 0, 2) # NLD -> LND
features = self.linear_1(features)
features = self.gelu(features)
features = self.linear_2(features)
features = features.permute(1, 0, 2) # LND -> NLD
features = self.pooling(features)
return features
@auto_docstring
class PerceptionLMPreTrainedModel(PreTrainedModel):
config: PerceptionLMConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_flex_attn = True
_supports_attention_backend = True
@dataclass
@auto_docstring(
custom_intro="""
Base class for PerceptionLM outputs, with hidden states and attentions.
"""
)
class PerceptionLMModelOutputWithPast(BaseModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
Image hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_videos, sequence_length, hidden_size)`.
Video hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: Optional[torch.FloatTensor] = None
video_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for PerceptionLM causal language model (or autoregressive) outputs.
"""
)
class PerceptionLMCausalLMOutputWithPast(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
Image hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_videos, sequence_length, hidden_size)`.
Video hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[list[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
video_hidden_states: Optional[torch.FloatTensor] = None
@auto_docstring
class PerceptionLMModel(PerceptionLMPreTrainedModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: PerceptionLMConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = PerceptionLMMultiModalProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def set_decoder(self, decoder):
self.language_model = decoder
def get_decoder(self):
return self.language_model
def get_image_features(
self,
pixel_values: torch.FloatTensor,
**kwargs,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_tiles, channels, height, width)`)
The tensors corresponding to the input images.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_tiles, num_patches, embed_dim)`).
"""
image_outputs = self.vision_tower(pixel_values.flatten(0, 1))
image_outputs = image_outputs.last_hidden_state
if self.config.vision_use_cls_token:
image_outputs = image_outputs[:, 1:, :]
image_features = self.multi_modal_projector(image_outputs)
return image_features
def get_placeholder_mask(
self,
input_ids: torch.LongTensor,
inputs_embeds: torch.FloatTensor,
image_features: torch.FloatTensor = None,
video_features: torch.FloatTensor = None,
):
"""
Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_video_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_video_mask = special_video_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_video_mask = input_ids == self.config.video_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.size()[:-1].numel()}"
)
n_video_tokens = special_video_mask.sum()
special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
raise ValueError(
f"Videos features and image tokens do not match: tokens: {n_video_tokens}, features {video_features.size()[:-1].numel()}"
)
return special_image_mask, special_video_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[list[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**lm_kwargs,
) -> Union[tuple, PerceptionLMModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if (pixel_values is not None or pixel_values_videos is not None) and inputs_embeds is not None:
raise ValueError(
"You cannot specify both (pixel_values or pixel_values_videos) and inputs_embeds at the same time, and must specify either one"
)
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
image_features = None
if pixel_values is not None:
image_features = self.get_image_features(pixel_values=pixel_values)
image_features = image_features.to(inputs_embeds.device, dtype=inputs_embeds.dtype)
special_image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
video_features = None
if pixel_values_videos is not None:
video_features = self.get_image_features(pixel_values=pixel_values_videos)
video_features = video_features.to(inputs_embeds.device, dtype=inputs_embeds.dtype)
_, special_video_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, video_features=video_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_video_mask, video_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**lm_kwargs,
)
return PerceptionLMModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
past_key_values=outputs.past_key_values,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
video_hidden_states=(video_features if pixel_values_videos is not None else None),
)
@auto_docstring
class PerceptionLMForConditionalGeneration(PerceptionLMPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {}
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config: PerceptionLMConfig):
super().__init__(config)
self.model = PerceptionLMModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def set_decoder(self, decoder):
self.model.set_decoder(decoder)
def get_decoder(self):
return self.model.get_decoder()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[list[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**lm_kwargs,
) -> Union[tuple, PerceptionLMCausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, PerceptionLMForConditionalGeneration
>>> model = PerceptionLMForConditionalGeneration.from_pretrained("perception_lm-hf/perception_lm-1.5-7b-hf")
>>> processor = AutoProcessor.from_pretrained("perception_lm-hf/perception_lm-1.5-7b-hf")
>>> prompt = "USER: <image>\nWhat's the content of the image? ASSISTANT:"
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_new_tokens=15)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"USER: \nWhat's the content of the image? ASSISTANT: The image features a busy city street with a stop sign prominently displayed"
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**lm_kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits,
labels=labels,
vocab_size=self.config.text_config.vocab_size,
**lm_kwargs,
)
return PerceptionLMCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
video_hidden_states=outputs.video_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
pixel_values_videos=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
if cache_position[0] == 0:
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
model_inputs["pixel_values"] = pixel_values
model_inputs["pixel_values_videos"] = pixel_values_videos
return model_inputs
__all__ = ["PerceptionLMForConditionalGeneration", "PerceptionLMPreTrainedModel", "PerceptionLMModel"]
| transformers/src/transformers/models/perception_lm/modeling_perception_lm.py/0 | {
"file_path": "transformers/src/transformers/models/perception_lm/modeling_perception_lm.py",
"repo_id": "transformers",
"token_count": 8727
} | 525 |
# coding=utf-8
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Phi-3 model."""
from typing import Callable, Optional
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import logging
from ...utils.deprecation import deprecate_kwarg
from ..mistral.modeling_mistral import (
MistralDecoderLayer,
MistralForCausalLM,
MistralForSequenceClassification,
MistralForTokenClassification,
MistralPreTrainedModel,
eager_attention_forward,
rotate_half,
)
from .configuration_phi3 import Phi3Config
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct"
_CONFIG_FOR_DOC = "Phi3Config"
class Phi3MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
self.activation_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
up_states = self.gate_up_proj(hidden_states)
gate, up_states = up_states.chunk(2, dim=-1)
up_states = up_states * self.activation_fn(gate)
return self.down_proj(up_states)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
q_embed = torch.cat([(q_rot * cos) + (rotate_half(q_rot) * sin), q_pass], dim=-1)
k_embed = torch.cat([(k_rot * cos) + (rotate_half(k_rot) * sin), k_pass], dim=-1)
return q_embed, k_embed
class Phi3Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.num_key_value_heads = config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
op_size = config.num_attention_heads * self.head_dim + 2 * (config.num_key_value_heads * self.head_dim)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.qkv_proj = nn.Linear(config.hidden_size, op_size, bias=False)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
qkv = self.qkv_proj(hidden_states)
query_pos = self.config.num_attention_heads * self.head_dim
query_states = qkv[..., :query_pos]
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=getattr(self.config, "sliding_window", None),
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Phi3DecoderLayer(MistralDecoderLayer):
def __init__(self, config: Phi3Config, layer_idx: int):
super().__init__(config, layer_idx)
self.config = config
self.self_attn = Phi3Attention(config=config, layer_idx=layer_idx)
self.mlp = Phi3MLP(config)
self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + self.resid_attn_dropout(hidden_states) # main diff with Llama
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.resid_mlp_dropout(hidden_states) # main diff with Llama
return hidden_states
class Phi3PreTrainedModel(MistralPreTrainedModel):
_version = "0.0.5"
class Phi3ForCausalLM(MistralForCausalLM, Phi3PreTrainedModel):
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the
# process
# When the first time input length reached long and short factor switching point, enforce re-compute cache
# It will cause downside of slower at this single token position, however, better than current failure.
if (
past_key_values
and self.config.rope_scaling
and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1
):
past_length = cache_position[0]
if past_length <= self.config.original_max_position_embeddings:
past_key_values = None
model_inputs = Phi3PreTrainedModel().prepare_inputs_for_generation(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
use_cache=use_cache,
logits_to_keep=logits_to_keep,
**kwargs,
)
return model_inputs
class Phi3ForSequenceClassification(MistralForSequenceClassification):
pass
class Phi3ForTokenClassification(MistralForTokenClassification):
pass
__all__ = [
"Phi3PreTrainedModel",
"Phi3Model", # noqa: F822
"Phi3ForCausalLM",
"Phi3ForSequenceClassification",
"Phi3ForTokenClassification",
]
| transformers/src/transformers/models/phi3/modular_phi3.py/0 | {
"file_path": "transformers/src/transformers/models/phi3/modular_phi3.py",
"repo_id": "transformers",
"token_count": 4620
} | 526 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from t5x import checkpoints
from transformers import (
AutoTokenizer,
Pix2StructConfig,
Pix2StructForConditionalGeneration,
Pix2StructImageProcessor,
Pix2StructProcessor,
Pix2StructTextConfig,
Pix2StructVisionConfig,
)
def get_flax_param(t5x_checkpoint_path):
flax_params = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
flax_params = flatten_dict(flax_params)
return flax_params
def rename_and_convert_flax_params(flax_dict):
converted_dict = {}
CONVERSION_MAPPING = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
DECODER_CONVERSION_MAPPING = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict:
if "target" in key:
# remove the first prefix from the key
new_key = ".".join(key[1:])
# rename the key
for old, new in CONVERSION_MAPPING.items():
new_key = new_key.replace(old, new)
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
new_key = new_key.replace(old, new)
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key)
new_key = new_key.replace("encoder", "encoder.encoder")
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key)
converted_dict[new_key] = flax_dict[key]
converted_torch_dict = {}
# convert converted_dict into torch format
for key, value in converted_dict.items():
if ("embed_tokens" not in key) and ("embedder" not in key):
converted_torch_dict[key] = torch.from_numpy(value.T)
else:
converted_torch_dict[key] = torch.from_numpy(value)
return converted_torch_dict
def convert_pix2struct_original_pytorch_checkpoint_to_hf(
t5x_checkpoint_path, pytorch_dump_folder_path, use_large=False, is_vqa=False
):
flax_params = get_flax_param(t5x_checkpoint_path)
if not use_large:
encoder_config = Pix2StructVisionConfig()
decoder_config = Pix2StructTextConfig()
else:
encoder_config = Pix2StructVisionConfig(
hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18
)
decoder_config = Pix2StructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18)
config = Pix2StructConfig(
vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=is_vqa
)
model = Pix2StructForConditionalGeneration(config)
torch_params = rename_and_convert_flax_params(flax_params)
model.load_state_dict(torch_params)
tok = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer")
image_processor = Pix2StructImageProcessor()
processor = Pix2StructProcessor(image_processor=image_processor, tokenizer=tok)
if use_large:
processor.image_processor.max_patches = 4096
processor.image_processor.is_vqa = True
# mkdir if needed
os.makedirs(pytorch_dump_folder_path, exist_ok=True)
model.save_pretrained(pytorch_dump_folder_path)
processor.save_pretrained(pytorch_dump_folder_path)
print(f"Model saved in {pytorch_dump_folder_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
args = parser.parse_args()
convert_pix2struct_original_pytorch_checkpoint_to_hf(
args.t5x_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| transformers/src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py",
"repo_id": "transformers",
"token_count": 2427
} | 527 |
# coding=utf-8
# Copyright 2022, UCLA NLP, The Facebook AI Research Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shutil import copyfile
from typing import Any, Optional
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
FAIRSEQ_LANGUAGE_CODES = {
"base": ["__java__", "__python__", "__en_XX__"],
"multi": ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"],
}
FAIRSEQ_LANGUAGE_CODES_MAP = {
"java": "__java__",
"python": "__python__",
"en_XX": "__en_XX__",
"javascript": "__javascript__",
"php": "__php__",
"ruby": "__ruby__",
"go": "__go__",
}
@requires(backends=("sentencepiece",))
class PLBartTokenizer(PreTrainedTokenizer):
"""
Construct an PLBART tokenizer.
Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
<tokens> <eos>` for target language documents.
Args:
vocab_file (`str`):
Path to the vocabulary file.
src_lang (`str`, *optional*):
A string representing the source language.
tgt_lang (`str`, *optional*):
A string representing the target language.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The start of sequence token.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The cls token, which is a special token used as the first token for all tasks.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token(`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masking tasks. This
is only used in the `"base"` tokenizer type. For `"multi"` tokenizer, masking is never done for the
downstream tasks.
language_codes (`str`, *optional*, defaults to `"base"`):
What language codes to use. Should be one of `"base"` or `"multi"`.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples:
```python
>>> from transformers import PLBartTokenizer
>>> tokenizer = PLBartTokenizer.from_pretrained("uclanlp/plbart-python-en_XX", src_lang="python", tgt_lang="en_XX")
>>> example_python_phrase = "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])"
>>> expected_translation_english = "Returns the maximum value of a b c."
>>> inputs = tokenizer(example_python_phrase, text_target=expected_translation_english, return_tensors="pt")
```"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: list[int] = []
suffix_tokens: list[int] = []
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
language_codes="base",
tokenizer_file=None,
src_lang=None,
tgt_lang=None,
sp_model_kwargs: Optional[dict[str, Any]] = None,
additional_special_tokens=None,
clean_up_tokenization_spaces=True,
**kwargs,
):
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
src_lang = self._convert_lang_code_special_format(src_lang)
tgt_lang = self._convert_lang_code_special_format(tgt_lang)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.language_codes = language_codes
fairseq_language_codes = FAIRSEQ_LANGUAGE_CODES[self.language_codes]
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
self.fairseq_offset = 1
self.sp_model_size = len(self.sp_model)
self.lang_code_to_id = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(fairseq_language_codes)
}
self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
if self.language_codes == "base":
self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_additional_special_tokens = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens]
)
if self.language_codes == "base":
self._src_lang = src_lang
self.cur_lang_code_id = (
self.lang_code_to_id[self._src_lang] if self._src_lang is not None else self._src_lang
)
else:
self._src_lang = src_lang if src_lang is not None else "__en_XX__"
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
language_codes=language_codes,
tokenizer_file=tokenizer_file,
src_lang=src_lang,
tgt_lang=tgt_lang,
additional_special_tokens=_additional_special_tokens,
sp_model_kwargs=self.sp_model_kwargs,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
state["sp_model_proto"] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def vocab_size(self):
if self.language_codes == "base":
return (
len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1
) # Plus 1 for the mask token
else:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
new_src_lang = self._convert_lang_code_special_format(new_src_lang)
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1] * len(self.suffix_tokens)
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An PLBART sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `X [eos, src_lang_code]`
- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. PLBart does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def _build_translation_inputs(
self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = self._convert_lang_code_special_format(src_lang)
self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(self.tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> list[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def prepare_seq2seq_batch(
self,
src_texts: list[str],
src_lang: str = "en_XX",
tgt_texts: Optional[list[str]] = None,
tgt_lang: str = "python",
**kwargs,
) -> BatchEncoding:
self.src_lang = self._convert_lang_code_special_format(src_lang)
self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
"""Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
src_lang = self._convert_lang_code_special_format(src_lang)
self.cur_lang_code = self.lang_code_to_id[src_lang] if src_lang is not None else None
self.prefix_tokens = []
if self.cur_lang_code is not None:
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
else:
self.suffix_tokens = [self.eos_token_id]
def set_tgt_lang_special_tokens(self, lang: str) -> None:
"""Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
lang = self._convert_lang_code_special_format(lang)
self.cur_lang_code = self.lang_code_to_id[lang] if lang is not None else None
self.prefix_tokens = []
if self.cur_lang_code is not None:
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
else:
self.suffix_tokens = [self.eos_token_id]
def _convert_lang_code_special_format(self, lang: str) -> str:
"""Convert Language Codes to format tokenizer uses if required"""
lang = FAIRSEQ_LANGUAGE_CODES_MAP.get(lang, lang)
return lang
__all__ = ["PLBartTokenizer"]
| transformers/src/transformers/models/plbart/tokenization_plbart.py/0 | {
"file_path": "transformers/src/transformers/models/plbart/tokenization_plbart.py",
"repo_id": "transformers",
"token_count": 8254
} | 528 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_prompt_depth_anything.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# Copyright 2025 The HuggingFace Team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import verify_backbone_config_arguments
from ..auto.configuration_auto import CONFIG_MAPPING
logger = logging.get_logger(__name__)
class PromptDepthAnythingConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PromptDepthAnythingModel`]. It is used to instantiate a PromptDepthAnything
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the PromptDepthAnything
[LiheYoung/depth-anything-small-hf](https://huggingface.co/LiheYoung/depth-anything-small-hf) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone_config (`Union[dict[str, Any], PretrainedConfig]`, *optional*):
The configuration of the backbone model. Only used in case `is_hybrid` is `True` or in case you want to
leverage the [`AutoBackbone`] API.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
API.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
patch_size (`int`, *optional*, defaults to 14):
The size of the patches to extract from the backbone features.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
reassemble_hidden_size (`int`, *optional*, defaults to 384):
The number of input channels of the reassemble layers.
reassemble_factors (`list[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`):
The up/downsampling factors of the reassemble layers.
neck_hidden_sizes (`list[str]`, *optional*, defaults to `[48, 96, 192, 384]`):
The hidden sizes to project to for the feature maps of the backbone.
fusion_hidden_size (`int`, *optional*, defaults to 64):
The number of channels before fusion.
head_in_index (`int`, *optional*, defaults to -1):
The index of the features to use in the depth estimation head.
head_hidden_size (`int`, *optional*, defaults to 32):
The number of output channels in the second convolution of the depth estimation head.
depth_estimation_type (`str`, *optional*, defaults to `"relative"`):
The type of depth estimation to use. Can be one of `["relative", "metric"]`.
max_depth (`float`, *optional*):
The maximum depth to use for the "metric" depth estimation head. 20 should be used for indoor models
and 80 for outdoor models. For "relative" depth estimation, this value is ignored.
Example:
```python
>>> from transformers import PromptDepthAnythingConfig, PromptDepthAnythingForDepthEstimation
>>> # Initializing a PromptDepthAnything small style configuration
>>> configuration = PromptDepthAnythingConfig()
>>> # Initializing a model from the PromptDepthAnything small style configuration
>>> model = PromptDepthAnythingForDepthEstimation(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "prompt_depth_anything"
def __init__(
self,
backbone_config=None,
backbone=None,
use_pretrained_backbone=False,
use_timm_backbone=False,
backbone_kwargs=None,
patch_size=14,
initializer_range=0.02,
reassemble_hidden_size=384,
reassemble_factors=[4, 2, 1, 0.5],
neck_hidden_sizes=[48, 96, 192, 384],
fusion_hidden_size=64,
head_in_index=-1,
head_hidden_size=32,
depth_estimation_type="relative",
max_depth=None,
**kwargs,
):
super().__init__(**kwargs)
if backbone_config is None and backbone is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Dinov2` backbone.")
backbone_config = CONFIG_MAPPING["dinov2"](
image_size=518,
hidden_size=384,
num_attention_heads=6,
out_indices=[9, 10, 11, 12],
apply_layernorm=True,
reshape_hidden_states=False,
)
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.backbone_config = backbone_config
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
self.reassemble_hidden_size = reassemble_hidden_size
self.patch_size = patch_size
self.initializer_range = initializer_range
self.reassemble_factors = reassemble_factors
self.neck_hidden_sizes = neck_hidden_sizes
self.fusion_hidden_size = fusion_hidden_size
self.head_in_index = head_in_index
self.head_hidden_size = head_hidden_size
if depth_estimation_type not in ["relative", "metric"]:
raise ValueError("depth_estimation_type must be one of ['relative', 'metric']")
self.depth_estimation_type = depth_estimation_type
self.max_depth = max_depth if max_depth else 1
@property
def sub_configs(self):
return (
{"backbone_config": type(self.backbone_config)}
if getattr(self, "backbone_config", None) is not None
else {}
)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
output["backbone_config"] = self.backbone_config.to_dict()
output["model_type"] = self.__class__.model_type
return output
__all__ = ["PromptDepthAnythingConfig"]
| transformers/src/transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py/0 | {
"file_path": "transformers/src/transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py",
"repo_id": "transformers",
"token_count": 3566
} | 529 |
# coding=utf-8
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Qwen2-VL.
"""
from typing import Optional, Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import logging
from ...video_utils import VideoInput
logger = logging.get_logger(__name__)
class Qwen2VLImagesKwargs(ImagesKwargs):
min_pixels: Optional[int]
max_pixels: Optional[int]
patch_size: Optional[int]
temporal_patch_size: Optional[int]
merge_size: Optional[int]
class Qwen2VLProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: Qwen2VLImagesKwargs
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
}
class Qwen2VLProcessor(ProcessorMixin):
r"""
Constructs a Qwen2-VL processor which wraps a Qwen2-VL image processor and a Qwen2 tokenizer into a single processor.
[`Qwen2VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
[`~Qwen2VLProcessor.__call__`] and [`~Qwen2VLProcessor.decode`] for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`Qwen2VLVideoProcessor`], *optional*):
The video processor is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ["image_processor", "tokenizer", "video_processor"]
image_processor_class = "AutoImageProcessor"
video_processor_class = "AutoVideoProcessor"
tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
self.video_token_id = (
tokenizer.video_token_id
if getattr(tokenizer, "video_token_id", None)
else tokenizer.convert_tokens_to_ids(self.video_token)
)
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
def __call__(
self,
images: ImageInput = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
videos: VideoInput = None,
**kwargs: Unpack[Qwen2VLProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Qwen2VLProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
image_inputs = videos_inputs = {}
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
video_grid_thw = videos_inputs["video_grid_thw"]
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if images is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if videos is not None:
merge_length = self.video_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.video_token in text[i]:
num_video_tokens = video_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.video_token, "<|placeholder|>" * num_video_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.video_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Qwen2VLProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
if video_sizes is not None:
videos_kwargs = Qwen2VLProcessorKwargs._defaults.get("videos_kwargs", {})
videos_kwargs.update(kwargs)
num_video_patches = [
self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs)
for video_size in video_sizes
]
num_video_tokens = [(num_patches // merge_size**2) for num_patches in num_video_patches]
vision_data["num_video_tokens"] = num_video_tokens
return MultiModalData(**vision_data)
def post_process_image_text_to_text(
self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
):
"""
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
"""
return self.tokenizer.batch_decode(
generated_outputs,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
__all__ = ["Qwen2VLProcessor"]
| transformers/src/transformers/models/qwen2_vl/processing_qwen2_vl.py/0 | {
"file_path": "transformers/src/transformers/models/qwen2_vl/processing_qwen2_vl.py",
"repo_id": "transformers",
"token_count": 5350
} | 530 |
# coding=utf-8
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow RegNet model."""
from typing import Optional, Union
import tensorflow as tf
from ...activations_tf import ACT2FN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import (
TFPreTrainedModel,
TFSequenceClassificationLoss,
keras,
keras_serializable,
unpack_inputs,
)
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "RegNetConfig"
# Base docstring
_CHECKPOINT_FOR_DOC = "facebook/regnet-y-040"
_EXPECTED_OUTPUT_SHAPE = [1, 1088, 7, 7]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "facebook/regnet-y-040"
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
class TFRegNetConvLayer(keras.layers.Layer):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
groups: int = 1,
activation: Optional[str] = "relu",
**kwargs,
):
super().__init__(**kwargs)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
self.padding = keras.layers.ZeroPadding2D(padding=kernel_size // 2)
self.convolution = keras.layers.Conv2D(
filters=out_channels,
kernel_size=kernel_size,
strides=stride,
padding="VALID",
groups=groups,
use_bias=False,
name="convolution",
)
self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
self.activation = ACT2FN[activation] if activation is not None else tf.identity
self.in_channels = in_channels
self.out_channels = out_channels
def call(self, hidden_state):
hidden_state = self.convolution(self.padding(hidden_state))
hidden_state = self.normalization(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "convolution", None) is not None:
with tf.name_scope(self.convolution.name):
self.convolution.build([None, None, None, self.in_channels])
if getattr(self, "normalization", None) is not None:
with tf.name_scope(self.normalization.name):
self.normalization.build([None, None, None, self.out_channels])
class TFRegNetEmbeddings(keras.layers.Layer):
"""
RegNet Embeddings (stem) composed of a single aggressive convolution.
"""
def __init__(self, config: RegNetConfig, **kwargs):
super().__init__(**kwargs)
self.num_channels = config.num_channels
self.embedder = TFRegNetConvLayer(
in_channels=config.num_channels,
out_channels=config.embedding_size,
kernel_size=3,
stride=2,
activation=config.hidden_act,
name="embedder",
)
def call(self, pixel_values):
num_channels = shape_list(pixel_values)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
# When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
hidden_state = self.embedder(pixel_values)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "embedder", None) is not None:
with tf.name_scope(self.embedder.name):
self.embedder.build(None)
class TFRegNetShortCut(keras.layers.Layer):
"""
RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
"""
def __init__(self, in_channels: int, out_channels: int, stride: int = 2, **kwargs):
super().__init__(**kwargs)
self.convolution = keras.layers.Conv2D(
filters=out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution"
)
self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
self.in_channels = in_channels
self.out_channels = out_channels
def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor:
return self.normalization(self.convolution(inputs), training=training)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "convolution", None) is not None:
with tf.name_scope(self.convolution.name):
self.convolution.build([None, None, None, self.in_channels])
if getattr(self, "normalization", None) is not None:
with tf.name_scope(self.normalization.name):
self.normalization.build([None, None, None, self.out_channels])
class TFRegNetSELayer(keras.layers.Layer):
"""
Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://huggingface.co/papers/1709.01507).
"""
def __init__(self, in_channels: int, reduced_channels: int, **kwargs):
super().__init__(**kwargs)
self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler")
self.attention = [
keras.layers.Conv2D(filters=reduced_channels, kernel_size=1, activation="relu", name="attention.0"),
keras.layers.Conv2D(filters=in_channels, kernel_size=1, activation="sigmoid", name="attention.2"),
]
self.in_channels = in_channels
self.reduced_channels = reduced_channels
def call(self, hidden_state):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
pooled = self.pooler(hidden_state)
for layer_module in self.attention:
pooled = layer_module(pooled)
hidden_state = hidden_state * pooled
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "pooler", None) is not None:
with tf.name_scope(self.pooler.name):
self.pooler.build((None, None, None, None))
if getattr(self, "attention", None) is not None:
with tf.name_scope(self.attention[0].name):
self.attention[0].build([None, None, None, self.in_channels])
with tf.name_scope(self.attention[1].name):
self.attention[1].build([None, None, None, self.reduced_channels])
class TFRegNetXLayer(keras.layers.Layer):
"""
RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1.
"""
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs):
super().__init__(**kwargs)
should_apply_shortcut = in_channels != out_channels or stride != 1
groups = max(1, out_channels // config.groups_width)
self.shortcut = (
TFRegNetShortCut(in_channels, out_channels, stride=stride, name="shortcut")
if should_apply_shortcut
else keras.layers.Activation("linear", name="shortcut")
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
self.layers = [
TFRegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"),
TFRegNetConvLayer(
out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1"
),
TFRegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None, name="layer.2"),
]
self.activation = ACT2FN[config.hidden_act]
def call(self, hidden_state):
residual = hidden_state
for layer_module in self.layers:
hidden_state = layer_module(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "shortcut", None) is not None:
with tf.name_scope(self.shortcut.name):
self.shortcut.build(None)
if getattr(self, "layers", None) is not None:
for layer in self.layers:
with tf.name_scope(layer.name):
layer.build(None)
class TFRegNetYLayer(keras.layers.Layer):
"""
RegNet's Y layer: an X layer with Squeeze and Excitation.
"""
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs):
super().__init__(**kwargs)
should_apply_shortcut = in_channels != out_channels or stride != 1
groups = max(1, out_channels // config.groups_width)
self.shortcut = (
TFRegNetShortCut(in_channels, out_channels, stride=stride, name="shortcut")
if should_apply_shortcut
else keras.layers.Activation("linear", name="shortcut")
)
self.layers = [
TFRegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"),
TFRegNetConvLayer(
out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1"
),
TFRegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4)), name="layer.2"),
TFRegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None, name="layer.3"),
]
self.activation = ACT2FN[config.hidden_act]
def call(self, hidden_state):
residual = hidden_state
for layer_module in self.layers:
hidden_state = layer_module(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "shortcut", None) is not None:
with tf.name_scope(self.shortcut.name):
self.shortcut.build(None)
if getattr(self, "layers", None) is not None:
for layer in self.layers:
with tf.name_scope(layer.name):
layer.build(None)
class TFRegNetStage(keras.layers.Layer):
"""
A RegNet stage composed by stacked layers.
"""
def __init__(
self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs
):
super().__init__(**kwargs)
layer = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
self.layers = [
# downsampling is done in the first layer with stride of 2
layer(config, in_channels, out_channels, stride=stride, name="layers.0"),
*[layer(config, out_channels, out_channels, name=f"layers.{i + 1}") for i in range(depth - 1)],
]
def call(self, hidden_state):
for layer_module in self.layers:
hidden_state = layer_module(hidden_state)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "layers", None) is not None:
for layer in self.layers:
with tf.name_scope(layer.name):
layer.build(None)
class TFRegNetEncoder(keras.layers.Layer):
def __init__(self, config: RegNetConfig, **kwargs):
super().__init__(**kwargs)
self.stages = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
config,
config.embedding_size,
config.hidden_sizes[0],
stride=2 if config.downsample_in_first_stage else 1,
depth=config.depths[0],
name="stages.0",
)
)
in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, config.depths[1:])):
self.stages.append(TFRegNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i + 1}"))
def call(
self, hidden_state: tf.Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> TFBaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage_module(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
def build(self, input_shape=None):
if self.built:
return
self.built = True
for stage in self.stages:
with tf.name_scope(stage.name):
stage.build(None)
@keras_serializable
class TFRegNetMainLayer(keras.layers.Layer):
config_class = RegNetConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embedder = TFRegNetEmbeddings(config, name="embedder")
self.encoder = TFRegNetEncoder(config, name="encoder")
self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler")
@unpack_inputs
def call(
self,
pixel_values: tf.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> TFBaseModelOutputWithPoolingAndNoAttention:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output = self.embedder(pixel_values, training=training)
encoder_outputs = self.encoder(
embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
# Change to NCHW output format have uniformity in the modules
pooled_output = tf.transpose(pooled_output, perm=(0, 3, 1, 2))
last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
hidden_states = tuple(tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "embedder", None) is not None:
with tf.name_scope(self.embedder.name):
self.embedder.build(None)
if getattr(self, "encoder", None) is not None:
with tf.name_scope(self.encoder.name):
self.encoder.build(None)
if getattr(self, "pooler", None) is not None:
with tf.name_scope(self.pooler.name):
self.pooler.build((None, None, None, None))
class TFRegNetPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RegNetConfig
base_model_prefix = "regnet"
main_input_name = "pixel_values"
@property
def input_signature(self):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.float32)}
REGNET_START_DOCSTRING = r"""
This model is a Tensorflow
[keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
REGNET_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.",
REGNET_START_DOCSTRING,
)
class TFRegNetModel(TFRegNetPreTrainedModel):
def __init__(self, config: RegNetConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.regnet = TFRegNetMainLayer(config, name="regnet")
@unpack_inputs
@add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutputWithPoolingAndNoAttention,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def call(
self,
pixel_values: tf.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, tuple[tf.Tensor]]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.regnet(
pixel_values=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state,
pooler_output=outputs.pooler_output,
hidden_states=outputs.hidden_states,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "regnet", None) is not None:
with tf.name_scope(self.regnet.name):
self.regnet.build(None)
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""",
REGNET_START_DOCSTRING,
)
class TFRegNetForImageClassification(TFRegNetPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: RegNetConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.regnet = TFRegNetMainLayer(config, name="regnet")
# classification head
self.classifier = [
keras.layers.Flatten(),
keras.layers.Dense(config.num_labels, name="classifier.1") if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def call(
self,
pixel_values: Optional[tf.Tensor] = None,
labels: Optional[tf.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[TFSequenceClassifierOutput, tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.regnet(
pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
flattened_output = self.classifier[0](pooled_output)
logits = self.classifier[1](flattened_output)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "regnet", None) is not None:
with tf.name_scope(self.regnet.name):
self.regnet.build(None)
if getattr(self, "classifier", None) is not None:
with tf.name_scope(self.classifier[1].name):
self.classifier[1].build([None, None, None, self.config.hidden_sizes[-1]])
__all__ = ["TFRegNetForImageClassification", "TFRegNetModel", "TFRegNetPreTrainedModel"]
| transformers/src/transformers/models/regnet/modeling_tf_regnet.py/0 | {
"file_path": "transformers/src/transformers/models/regnet/modeling_tf_regnet.py",
"repo_id": "transformers",
"token_count": 10447
} | 531 |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert RoBERTa checkpoint."""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
SAMPLE_TEXT = "Hello world! cécé herlolip"
def convert_roberta_checkpoint_to_pytorch(
roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool
):
"""
Copy/paste/tweak roberta's weights to our BERT structure.
"""
roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
roberta.eval() # disable dropout
roberta_sent_encoder = roberta.model.encoder.sentence_encoder
config = RobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
hidden_size=roberta.args.encoder_embed_dim,
num_hidden_layers=roberta.args.encoder_layers,
num_attention_heads=roberta.args.encoder_attention_heads,
intermediate_size=roberta.args.encoder_ffn_embed_dim,
max_position_embeddings=514,
type_vocab_size=1,
layer_norm_eps=1e-5, # PyTorch default used in fairseq
)
if classification_head:
config.num_labels = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our BERT config:", config)
model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)
model.eval()
# Now let's copy all the weights.
# Embeddings
model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight
) # just zero them out b/c RoBERTa doesn't use them.
model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight
model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
layer: BertLayer = model.roberta.encoder.layer[i]
roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
# self attention
self_attn: BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
)
self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight
self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias
self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight
self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias
self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight
self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias
# self-attention output
self_output: BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight
self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias
# intermediate
intermediate: BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape
intermediate.dense.weight = roberta_layer.fc1.weight
intermediate.dense.bias = roberta_layer.fc1.bias
# output
bert_output: BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape
bert_output.dense.weight = roberta_layer.fc2.weight
bert_output.dense.bias = roberta_layer.fc2.bias
bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight
bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias
# end of layer
if classification_head:
model.classifier.dense.weight = roberta.model.classification_heads["mnli"].dense.weight
model.classifier.dense.bias = roberta.model.classification_heads["mnli"].dense.bias
model.classifier.out_proj.weight = roberta.model.classification_heads["mnli"].out_proj.weight
model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight
model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias
model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight
model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias
model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight
model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
our_output = model(input_ids)[0]
if classification_head:
their_output = roberta.model.classification_heads["mnli"](roberta.extract_features(input_ids))
else:
their_output = roberta.model(input_ids)[0]
print(our_output.shape, their_output.shape)
max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
success = torch.allclose(our_output, their_output, atol=1e-3)
print("Do both models output the same tensors?", "🔥" if success else "💩")
if not success:
raise Exception("Something went wRoNg")
pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
print(f"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
args = parser.parse_args()
convert_roberta_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| transformers/src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 3215
} | 532 |
# coding=utf-8
# Copyright 2024 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyTorch RTDetr specific ResNet model. The main difference between hugginface ResNet model is that this RTDetrResNet model forces to use shortcut at the first layer in the resnet-18/34 models.
See https://github.com/lyuwenyu/RT-DETR/blob/5b628eaa0a2fc25bdafec7e6148d5296b144af85/rtdetr_pytorch/src/nn/backbone/presnet.py#L126 for details.
"""
import math
from typing import Optional
from torch import Tensor, nn
from ...activations import ACT2FN
from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from ...utils.backbone_utils import BackboneMixin
from .configuration_rt_detr_resnet import RTDetrResNetConfig
logger = logging.get_logger(__name__)
# Copied from transformers.models.resnet.modeling_resnet.ResNetConvLayer -> RTDetrResNetConvLayer
class RTDetrResNetConvLayer(nn.Module):
def __init__(
self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, activation: str = "relu"
):
super().__init__()
self.convolution = nn.Conv2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, bias=False
)
self.normalization = nn.BatchNorm2d(out_channels)
self.activation = ACT2FN[activation] if activation is not None else nn.Identity()
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state
class RTDetrResNetEmbeddings(nn.Module):
"""
ResNet Embeddings (stem) composed of a deep aggressive convolution.
"""
def __init__(self, config: RTDetrResNetConfig):
super().__init__()
self.embedder = nn.Sequential(
*[
RTDetrResNetConvLayer(
config.num_channels,
config.embedding_size // 2,
kernel_size=3,
stride=2,
activation=config.hidden_act,
),
RTDetrResNetConvLayer(
config.embedding_size // 2,
config.embedding_size // 2,
kernel_size=3,
stride=1,
activation=config.hidden_act,
),
RTDetrResNetConvLayer(
config.embedding_size // 2,
config.embedding_size,
kernel_size=3,
stride=1,
activation=config.hidden_act,
),
]
)
self.pooler = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.num_channels = config.num_channels
def forward(self, pixel_values: Tensor) -> Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embedding = self.embedder(pixel_values)
embedding = self.pooler(embedding)
return embedding
# Copied from transformers.models.resnet.modeling_resnet.ResNetShortCut -> RTDetrResNetChortCut
class RTDetrResNetShortCut(nn.Module):
"""
ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
"""
def __init__(self, in_channels: int, out_channels: int, stride: int = 2):
super().__init__()
self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.normalization = nn.BatchNorm2d(out_channels)
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
return hidden_state
class RTDetrResNetBasicLayer(nn.Module):
"""
A classic ResNet's residual layer composed by two `3x3` convolutions.
See https://github.com/lyuwenyu/RT-DETR/blob/5b628eaa0a2fc25bdafec7e6148d5296b144af85/rtdetr_pytorch/src/nn/backbone/presnet.py#L34.
"""
def __init__(
self,
config: RTDetrResNetConfig,
in_channels: int,
out_channels: int,
stride: int = 1,
should_apply_shortcut: bool = False,
):
super().__init__()
if in_channels != out_channels:
self.shortcut = (
nn.Sequential(
*[nn.AvgPool2d(2, 2, 0, ceil_mode=True), RTDetrResNetShortCut(in_channels, out_channels, stride=1)]
)
if should_apply_shortcut
else nn.Identity()
)
else:
self.shortcut = (
RTDetrResNetShortCut(in_channels, out_channels, stride=stride)
if should_apply_shortcut
else nn.Identity()
)
self.layer = nn.Sequential(
RTDetrResNetConvLayer(in_channels, out_channels, stride=stride),
RTDetrResNetConvLayer(out_channels, out_channels, activation=None),
)
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_state):
residual = hidden_state
hidden_state = self.layer(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
class RTDetrResNetBottleNeckLayer(nn.Module):
"""
A classic RTDetrResNet's bottleneck layer composed by three `3x3` convolutions.
The first `1x1` convolution reduces the input by a factor of `reduction` in order to make the second `3x3`
convolution faster. The last `1x1` convolution remaps the reduced features to `out_channels`. If
`downsample_in_bottleneck` is true, downsample will be in the first layer instead of the second layer.
"""
def __init__(
self,
config: RTDetrResNetConfig,
in_channels: int,
out_channels: int,
stride: int = 1,
):
super().__init__()
reduction = 4
should_apply_shortcut = in_channels != out_channels or stride != 1
reduces_channels = out_channels // reduction
if stride == 2:
self.shortcut = nn.Sequential(
*[
nn.AvgPool2d(2, 2, 0, ceil_mode=True),
RTDetrResNetShortCut(in_channels, out_channels, stride=1)
if should_apply_shortcut
else nn.Identity(),
]
)
else:
self.shortcut = (
RTDetrResNetShortCut(in_channels, out_channels, stride=stride)
if should_apply_shortcut
else nn.Identity()
)
self.layer = nn.Sequential(
RTDetrResNetConvLayer(
in_channels, reduces_channels, kernel_size=1, stride=stride if config.downsample_in_bottleneck else 1
),
RTDetrResNetConvLayer(
reduces_channels, reduces_channels, stride=stride if not config.downsample_in_bottleneck else 1
),
RTDetrResNetConvLayer(reduces_channels, out_channels, kernel_size=1, activation=None),
)
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_state):
residual = hidden_state
hidden_state = self.layer(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
class RTDetrResNetStage(nn.Module):
"""
A RTDetrResNet stage composed by stacked layers.
"""
def __init__(
self,
config: RTDetrResNetConfig,
in_channels: int,
out_channels: int,
stride: int = 2,
depth: int = 2,
):
super().__init__()
layer = RTDetrResNetBottleNeckLayer if config.layer_type == "bottleneck" else RTDetrResNetBasicLayer
if config.layer_type == "bottleneck":
first_layer = layer(
config,
in_channels,
out_channels,
stride=stride,
)
else:
first_layer = layer(config, in_channels, out_channels, stride=stride, should_apply_shortcut=True)
self.layers = nn.Sequential(
first_layer, *[layer(config, out_channels, out_channels) for _ in range(depth - 1)]
)
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
# Copied from transformers.models.resnet.modeling_resnet.ResNetEncoder with ResNet->RTDetrResNet
class RTDetrResNetEncoder(nn.Module):
def __init__(self, config: RTDetrResNetConfig):
super().__init__()
self.stages = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
RTDetrResNetStage(
config,
config.embedding_size,
config.hidden_sizes[0],
stride=2 if config.downsample_in_first_stage else 1,
depth=config.depths[0],
)
)
in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]):
self.stages.append(RTDetrResNetStage(config, in_channels, out_channels, depth=depth))
def forward(
self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> BaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage_module(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_state,
hidden_states=hidden_states,
)
@auto_docstring
# Copied from transformers.models.resnet.modeling_resnet.ResNetPreTrainedModel with ResNet->RTDetrResNet
class RTDetrResNetPreTrainedModel(PreTrainedModel):
config: RTDetrResNetConfig
base_model_prefix = "resnet"
main_input_name = "pixel_values"
_no_split_modules = ["RTDetrResNetConvLayer", "RTDetrResNetShortCut"]
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
# copied from the `reset_parameters` method of `class Linear(Module)` in `torch`.
elif isinstance(module, nn.Linear):
nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
if module.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(module.bias, -bound, bound)
elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
@auto_docstring(
custom_intro="""
ResNet backbone, to be used with frameworks like RTDETR.
"""
)
class RTDetrResNetBackbone(RTDetrResNetPreTrainedModel, BackboneMixin):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.embedding_size] + config.hidden_sizes
self.embedder = RTDetrResNetEmbeddings(config)
self.encoder = RTDetrResNetEncoder(config)
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import RTDetrResNetConfig, RTDetrResNetBackbone
>>> import torch
from ...utils.deprecation import deprecate_kwarg
from ...utils.deprecation import deprecate_kwarg
from ...utils.deprecation import deprecate_kwarg
from ...utils.deprecation import deprecate_kwarg
from ...utils.deprecation import deprecate_kwarg
>>> config = RTDetrResNetConfig()
>>> model = RTDetrResNetBackbone(config)
>>> pixel_values = torch.randn(1, 3, 224, 224)
>>> with torch.no_grad():
... outputs = model(pixel_values)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 2048, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output = self.embedder(pixel_values)
outputs = self.encoder(embedding_output, output_hidden_states=True, return_dict=True)
hidden_states = outputs.hidden_states
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
__all__ = [
"RTDetrResNetBackbone",
"RTDetrResNetPreTrainedModel",
]
| transformers/src/transformers/models/rt_detr/modeling_rt_detr_resnet.py/0 | {
"file_path": "transformers/src/transformers/models/rt_detr/modeling_rt_detr_resnet.py",
"repo_id": "transformers",
"token_count": 6876
} | 533 |
# coding=utf-8
# Copyright 2023 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch SAM model."""
import collections
from dataclasses import dataclass
from typing import Callable, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from transformers.utils.generic import OutputRecorder, TransformersKwargs, check_model_inputs
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
ModelOutput,
auto_docstring,
logging,
)
from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
Base class for sam vision model's outputs that also contains image embeddings obtained by applying the projection
layer to the pooler_output.
"""
)
class SamVisionEncoderOutput(ModelOutput):
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
"""
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Segment-Anything model's output
"""
)
class SamImageSegmentationOutput(ModelOutput):
r"""
iou_scores (`torch.FloatTensor` of shape `(batch_size, num_masks)`):
The iou scores of the predicted masks.
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_masks, height, width)`):
The predicted low resolutions masks. Needs to be post-processed by the processor
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
iou_scores: Optional[torch.FloatTensor] = None
pred_masks: Optional[torch.FloatTensor] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
class SamPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values):
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
)
embeddings = self.projection(pixel_values).permute(0, 2, 3, 1)
return embeddings
class SamMLPBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.lin1 = nn.Linear(config.hidden_size, config.mlp_dim)
self.lin2 = nn.Linear(config.mlp_dim, config.hidden_size)
self.act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.lin1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.lin2(hidden_states)
return hidden_states
# Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->Sam
class SamLayerNorm(nn.Module):
r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError(f"Unsupported data format: {self.data_format}")
self.normalized_shape = (normalized_shape,)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.data_format == "channels_last":
x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
input_dtype = x.dtype
x = x.float()
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = x.to(dtype=input_dtype)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class SamAttention(nn.Module):
"""
SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and
values.
"""
def __init__(self, config, downsample_rate=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate
self.internal_dim = config.hidden_size // downsample_rate
self.num_attention_heads = config.num_attention_heads
if self.internal_dim % config.num_attention_heads != 0:
raise ValueError("num_attention_heads must divide hidden_size.")
self.scaling = (self.internal_dim // config.num_attention_heads) ** -0.5
self.q_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.k_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.v_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.out_proj = nn.Linear(self.internal_dim, self.hidden_size)
self.is_causal = False
def _separate_heads(self, hidden_states: Tensor, num_attention_heads: int) -> Tensor:
batch, point_batch_size, n_tokens, channel = hidden_states.shape
c_per_head = channel // num_attention_heads
hidden_states = hidden_states.reshape(batch * point_batch_size, n_tokens, num_attention_heads, c_per_head)
return hidden_states.transpose(1, 2)
def _recombine_heads(self, hidden_states: Tensor, point_batch_size: int) -> Tensor:
batch, n_tokens, n_heads, c_per_head = hidden_states.shape
return hidden_states.reshape(batch // point_batch_size, point_batch_size, n_tokens, n_heads * c_per_head)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
attention_similarity: Optional[Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Tensor:
# Input projections
query = self.q_proj(query)
key = self.k_proj(key)
value = self.v_proj(value)
point_batch_size = query.shape[1]
# Separate into heads
query = self._separate_heads(query, self.num_attention_heads)
key = self._separate_heads(key, self.num_attention_heads)
value = self._separate_heads(value, self.num_attention_heads)
# SamAttention
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=attention_similarity,
dropout=0.0 if not self.training else self.dropout_p,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = self._recombine_heads(attn_output, point_batch_size)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
class SamTwoWayAttentionBlock(nn.Module):
def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False):
"""
A transformer block with four layers:
(1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on
sparse inputs (4) cross attention of dense inputs -> sparse inputs
Arguments:
config (`SamMaskDecoderConfig`):
The configuration file used to instantiate the block
attention_downsample_rate (*optionalk*, int, defaults to 2):
The downsample ratio of the block used to reduce the inner dim of the attention.
skip_first_layer_pe (*optional*, bool, defaults to `False`):
Whether or not to skip the addition of the query_point_embedding on the first layer.
"""
super().__init__()
self.hidden_size = config.hidden_size
self.layer_norm_eps = config.layer_norm_eps
self.self_attn = SamAttention(config, downsample_rate=1)
self.layer_norm1 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
self.cross_attn_token_to_image = SamAttention(config, downsample_rate=attention_downsample_rate)
self.layer_norm2 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
self.mlp = SamMLPBlock(config)
self.layer_norm3 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
self.layer_norm4 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
self.cross_attn_image_to_token = SamAttention(config, downsample_rate=attention_downsample_rate)
self.skip_first_layer_pe = skip_first_layer_pe
def forward(
self,
queries: Tensor,
keys: Tensor,
query_point_embedding: Tensor,
key_point_embedding: Tensor,
attention_similarity: Tensor,
**kwargs: Unpack[TransformersKwargs],
):
# Self attention block
if self.skip_first_layer_pe:
queries, _ = self.self_attn(query=queries, key=queries, value=queries)
else:
query = queries + query_point_embedding
attn_out, _ = self.self_attn(query=query, key=query, value=queries)
queries = queries + attn_out
queries = self.layer_norm1(queries)
# Cross attention block, tokens attending to image embedding
query = queries + query_point_embedding
key = keys + key_point_embedding
attn_out, _ = self.cross_attn_token_to_image(
query=query, key=key, value=keys, attention_similarity=attention_similarity
)
queries = queries + attn_out
queries = self.layer_norm2(queries)
# MLP block
mlp_out = self.mlp(queries)
queries = queries + mlp_out
queries = self.layer_norm3(queries)
# Cross attention block, image embedding attending to tokens
query = queries + query_point_embedding
key = keys + key_point_embedding
attn_out, _ = self.cross_attn_image_to_token(query=key, key=query, value=queries)
keys = keys + attn_out
keys = self.layer_norm4(keys)
return queries, keys, attn_out
class SamTwoWayTransformer(nn.Module):
def __init__(self, config: SamMaskDecoderConfig):
super().__init__()
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.layers = nn.ModuleList()
for i in range(self.num_hidden_layers):
self.layers.append(SamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0)))
self.final_attn_token_to_image = SamAttention(config)
self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size)
def forward(
self,
point_embeddings: Tensor,
image_embeddings: Tensor,
image_positional_embeddings: Tensor,
attention_similarity: Tensor,
target_embedding=None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutput]:
if image_embeddings is None:
raise ValueError("You have to specify an image_embedding")
image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
# Prepare queries
queries = point_embeddings
keys = image_embeddings
# Apply transformer blocks and final layernorm
for layer in self.layers:
if target_embedding is not None:
queries += target_embedding
queries, keys, _ = layer(
queries=queries,
keys=keys,
query_point_embedding=point_embeddings,
key_point_embedding=image_positional_embeddings,
attention_similarity=attention_similarity,
**kwargs,
)
# Apply the final attention layer from the points to the image
query = queries + point_embeddings
key = keys + image_positional_embeddings
attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys)
queries = queries + attn_out
queries = self.layer_norm_final_attn(queries)
return queries, keys
class SamFeedForward(nn.Module):
def __init__(
self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False
):
super().__init__()
self.num_layers = num_layers
self.activation = nn.ReLU()
self.proj_in = nn.Linear(input_dim, hidden_dim)
self.proj_out = nn.Linear(hidden_dim, output_dim)
self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)])
self.sigmoid_output = sigmoid_output
def forward(self, hidden_states):
hidden_states = self.proj_in(hidden_states)
hidden_states = self.activation(hidden_states)
for layer in self.layers:
hidden_states = self.activation(layer(hidden_states))
hidden_states = self.proj_out(hidden_states)
if self.sigmoid_output:
hidden_states = F.sigmoid(hidden_states)
return hidden_states
class SamMaskDecoder(nn.Module):
def __init__(self, config: SamMaskDecoderConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_multimask_outputs = config.num_multimask_outputs
self.num_mask_tokens = config.num_multimask_outputs + 1
self.iou_token = nn.Embedding(1, self.hidden_size)
self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size)
self.transformer = SamTwoWayTransformer(config)
# should we create a new class for this?
self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2)
self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2)
self.upscale_layer_norm = SamLayerNorm(self.hidden_size // 4, data_format="channels_first")
self.activation = nn.GELU()
mlps_list = []
for _ in range(self.num_mask_tokens):
mlps_list += [SamFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)]
self.output_hypernetworks_mlps = nn.ModuleList(mlps_list)
self.iou_prediction_head = SamFeedForward(
self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth
)
def forward(
self,
image_embeddings: torch.Tensor,
image_positional_embeddings: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
attention_similarity: Optional[torch.Tensor] = None,
target_embedding: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Args:
image_embeddings (`torch.Tensor`):
the embeddings from the image encoder
image_positional_embedding (`torch.Tensor`):
positional encoding with the shape of image_embeddings
sparse_prompt_embeddings (`torch.Tensor`):
The embeddings of the points and boxes
dense_prompt_embeddings (`torch.Tensor`):
the embeddings of the mask inputs
multimask_output (bool):
Whether to return multiple masks or a single mask.
"""
batch_size, num_channels, height, width = image_embeddings.shape
point_batch_size = sparse_prompt_embeddings.shape[1] if sparse_prompt_embeddings is not None else 1
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1)
if sparse_prompt_embeddings is not None:
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2)
else:
tokens = output_tokens
point_embeddings = tokens.to(self.iou_token.weight.dtype)
# Expand per-image data in batch direction to be per-point
image_embeddings = image_embeddings + dense_prompt_embeddings
image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0)
image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0)
# Run the transformer, image_positional_embedding are consumed
point_embedding, image_embeddings = self.transformer(
point_embeddings=point_embeddings,
image_embeddings=image_embeddings,
image_positional_embeddings=image_positional_embeddings,
attention_similarity=attention_similarity,
target_embedding=target_embedding,
)
iou_token_out = point_embedding[:, :, 0, :]
mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
image_embeddings = image_embeddings.transpose(2, 3).reshape(
batch_size * point_batch_size, num_channels, height, width
)
upscaled_embedding = self.upscale_conv1(image_embeddings)
upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))
upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding))
hyper_in_list = []
for i in range(self.num_mask_tokens):
current_mlp = self.output_hypernetworks_mlps[i]
hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])]
hyper_in = torch.stack(hyper_in_list, dim=2)
_, num_channels, height, width = upscaled_embedding.shape
upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width)
masks = (hyper_in @ upscaled_embedding).reshape(batch_size, point_batch_size, -1, height, width)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
# Select the correct mask or masks for output
if multimask_output:
mask_slice = slice(1, None)
else:
mask_slice = slice(0, 1)
masks = masks[:, :, mask_slice, :, :]
iou_pred = iou_pred[:, :, mask_slice]
return masks, iou_pred
class SamPositionalEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.scale = config.hidden_size // 2
self.register_buffer("positional_embedding", self.scale * torch.randn((2, config.num_pos_feats)))
def forward(self, input_coords, input_shape=None):
"""Positionally encode points that are normalized to [0,1]."""
coordinates = input_coords.clone()
if input_shape is not None:
coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1]
coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0]
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coordinates = 2 * coordinates - 1
coordinates = coordinates.to(self.positional_embedding.dtype)
coordinates = coordinates @ self.positional_embedding
coordinates = 2 * np.pi * coordinates
# outputs d_1 x ... x d_n x channel shape
return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1)
class SamMaskEmbedding(nn.Module):
def __init__(self, config: SamPromptEncoderConfig):
super().__init__()
self.mask_input_channels = config.mask_input_channels // 4
self.activation = ACT2FN[config.hidden_act]
self.conv1 = nn.Conv2d(1, self.mask_input_channels, kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(self.mask_input_channels, config.mask_input_channels, kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(config.mask_input_channels, config.hidden_size, kernel_size=1)
self.layer_norm1 = SamLayerNorm(
self.mask_input_channels, eps=config.layer_norm_eps, data_format="channels_first"
)
self.layer_norm2 = SamLayerNorm(
self.mask_input_channels * 4, eps=config.layer_norm_eps, data_format="channels_first"
)
def forward(self, masks):
hidden_states = self.conv1(masks)
hidden_states = self.layer_norm1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.activation(hidden_states)
dense_embeddings = self.conv3(hidden_states)
return dense_embeddings
class SamPromptEncoder(nn.Module):
def __init__(self, config: SamConfig):
super().__init__()
self.shared_embedding = SamPositionalEmbedding(config.vision_config)
config = config.prompt_encoder_config
self.mask_embed = SamMaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size)
self.input_image_size = config.image_size
self.point_embed = nn.ModuleList(
[nn.Embedding(1, config.hidden_size) for i in range(config.num_point_embeddings)]
)
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
target_point_shape = (points.shape[0], points.shape[1], 1, points.shape[-1])
target_labels_shape = (points.shape[0], points.shape[1], 1)
padding_point = torch.zeros(target_point_shape, device=points.device)
padding_label = -torch.ones(target_labels_shape, device=labels.device)
points = torch.cat([points, padding_point], dim=2)
labels = torch.cat([labels, padding_label], dim=2)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
# torch.where and expanding the labels tensor is required by the ONNX export
point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
# This is required for the ONNX export. The dtype, device need to be explicitly
# specified as otherwise torch.onnx.export interprets as double
point_embedding = torch.where(labels[..., None] != -10, point_embedding, torch.zeros_like(point_embedding))
point_embedding = torch.where(
(labels == 0)[:, :, :, None],
point_embedding + self.point_embed[0].weight[None, None, :, :],
point_embedding,
)
point_embedding = torch.where(
(labels == 1)[:, :, :, None],
point_embedding + self.point_embed[1].weight[None, None, :, :],
point_embedding,
)
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
batch_size, nb_boxes = boxes.shape[:2]
coords = boxes.reshape(batch_size, nb_boxes, 2, 2)
input_shape = (self.input_image_size, self.input_image_size)
corner_embedding = self.shared_embedding(coords, input_shape)
corner_embedding[:, :, 0, :] += self.point_embed[2].weight
corner_embedding[:, :, 1, :] += self.point_embed[3].weight
return corner_embedding
def forward(
self,
input_points: Optional[tuple[torch.Tensor, torch.Tensor]],
input_labels: Optional[torch.Tensor],
input_boxes: Optional[torch.Tensor],
input_masks: Optional[torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense embeddings.
Args:
points (`torch.Tensor`, *optional*):
point coordinates and labels to embed.
boxes (`torch.Tensor`, *optional*):
boxes to embed
masks (`torch.Tensor`, *optional*):
masks to embed
"""
sparse_embeddings = None
batch_size = 1
if input_points is not None:
batch_size = input_points.shape[0]
if input_labels is None:
raise ValueError("If points are provided, labels must also be provided.")
point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None))
sparse_embeddings = point_embeddings
if input_boxes is not None:
batch_size = input_boxes.shape[0]
box_embeddings = self._embed_boxes(input_boxes)
if sparse_embeddings is None:
sparse_embeddings = box_embeddings
else:
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2)
if input_masks is not None:
dense_embeddings = self.mask_embed(input_masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
class SamVisionAttention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(self, config, window_size):
super().__init__()
input_size = (
(config.image_size // config.patch_size, config.image_size // config.patch_size)
if window_size == 0
else (window_size, window_size)
)
self.num_attention_heads = config.num_attention_heads
head_dim = config.hidden_size // config.num_attention_heads
self.scale = head_dim**-0.5
self.dropout = config.attention_dropout
self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias)
self.proj = nn.Linear(config.hidden_size, config.hidden_size)
self.use_rel_pos = config.use_rel_pos
if self.use_rel_pos:
if input_size is None:
raise ValueError("Input size must be provided if using relative positional encoding.")
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int):
size of the query.
k_size (int):
size of key k.
rel_pos (`torch.Tensor`):
relative position embeddings (L, channel).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def get_decomposed_rel_pos(
self,
query: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: tuple[int, int],
k_size: tuple[int, int],
) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
Args:
query (`torch.Tensor`):
query q in the attention layer with shape (batch_size, query_height * query_width, channel).
rel_pos_h (`torch.Tensor`):
relative position embeddings (Lh, channel) for height axis.
rel_pos_w (`torch.Tensor`):
relative position embeddings (Lw, channel) for width axis.
q_size (tuple):
spatial sequence size of query q with (query_height, query_width).
k_size (tuple):
spatial sequence size of key k with (key_height, key_width).
Returns:
decomposed_rel_pos (`torch.Tensor`):
decomposed relative position embeddings.
"""
query_height, query_width = q_size
key_height, key_width = k_size
relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h)
relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w)
batch_size, _, dim = query.shape
reshaped_query = query.reshape(batch_size, query_height, query_width, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height)
rel_w = torch.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width)
decomposed_rel_pos = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
return decomposed_rel_pos
def forward(self, hidden_states: torch.Tensor, output_attentions=None) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, height, width, _ = hidden_states.shape
# qkv with shape (3, batch_size, nHead, height * width, channel)
qkv = (
self.qkv(hidden_states)
.reshape(batch_size, height * width, 3, self.num_attention_heads, -1)
.permute(2, 0, 3, 1, 4)
)
# q, k, v with shape (batch_size * nHead, height * width, channel)
query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0)
attn_weights = (query * self.scale) @ key.transpose(-2, -1)
if self.use_rel_pos:
decomposed_rel_pos = self.get_decomposed_rel_pos(
query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width)
)
decomposed_rel_pos = decomposed_rel_pos.reshape_as(attn_weights)
attn_weights = attn_weights + decomposed_rel_pos
attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype)
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = (attn_probs @ value).reshape(batch_size, self.num_attention_heads, height, width, -1)
attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1)
attn_output = self.proj(attn_output)
return attn_output, attn_weights
class SamVisionSdpaAttention(SamVisionAttention):
"""
Multi-head Attention block with relative position embeddings.
Using SDPA instead of the default attention.
"""
def __init__(self, config, window_size):
super().__init__(config, window_size)
def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor:
if output_attentions:
logger.warning_once(
"`SamVisionSdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support "
"`output_attentions=True`. Falling back to the manual attention implementation, but "
"specifying the manual implementation will be required from Transformers version v5.0.0 onwards. "
'This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
)
return super().forward(
hidden_states=hidden_states,
output_attentions=output_attentions,
)
batch_size, height, width, _ = hidden_states.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = (
self.qkv(hidden_states)
.reshape(batch_size, height * width, 3, self.num_attention_heads, -1)
.permute(2, 0, 3, 1, 4)
)
# q, k, v with shape (B * nHead, H * W, C)
query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0)
attn_bias = None
if self.use_rel_pos:
decomposed_rel_pos = self.get_decomposed_rel_pos(
query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width)
)
decomposed_rel_pos = decomposed_rel_pos.reshape(
batch_size, self.num_attention_heads, height * width, height * width
)
attn_bias = decomposed_rel_pos
query = query.view(batch_size, self.num_attention_heads, height * width, -1)
key = key.view(batch_size, self.num_attention_heads, height * width, -1)
value = value.view(batch_size, self.num_attention_heads, height * width, -1)
attn_output = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attn_bias)
attn_output = (
attn_output.view(batch_size, self.num_attention_heads, height, width, -1)
.permute(0, 2, 3, 1, 4)
.reshape(batch_size, height, width, -1)
)
attn_output = self.proj(attn_output)
return attn_output, None
SAM_VISION_ATTENTION_CLASSES = {
"eager": SamVisionAttention,
"sdpa": SamVisionSdpaAttention,
}
class SamVisionLayer(GradientCheckpointingLayer):
def __init__(self, config, window_size):
super().__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attn = SAM_VISION_ATTENTION_CLASSES[config._attn_implementation](config, window_size)
self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = SamMLPBlock(config)
self.window_size = window_size
def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> tuple[torch.Tensor, tuple[int, int]]:
"""
Args:
Partition into non-overlapping windows with padding if needed.
hidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window
size.
Returns:
windows: windows after partition with [batch_size * num_windows, window_size, window_size, channel].
(pad_height, pad_width): padded height and width before partition
"""
batch_size, height, width, channel = hidden_states.shape
pad_h = (window_size - height % window_size) % window_size
pad_w = (window_size - width % window_size) % window_size
hidden_states = F.pad(hidden_states, (0, 0, 0, pad_w, 0, pad_h))
pad_height, pad_width = height + pad_h, width + pad_w
hidden_states = hidden_states.reshape(
batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel
)
windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(-1, window_size, window_size, channel)
return windows, (pad_height, pad_width)
def window_unpartition(
self, windows: torch.Tensor, window_size: int, padding_shape: tuple[int, int], original_shape: tuple[int, int]
) -> torch.Tensor:
"""
Args:
Window unpartition into original sequences and removing padding.
hidden_states (tensor):
input tokens with [batch_size * num_windows, window_size, window_size, channel].
window_size (int):
window size.
padding_shape (Tuple):
padded height and width (pad_height, pad_width).
original_shape (Tuple): original height and width (height, width) before padding.
Returns:
hidden_states: unpartitioned sequences with [batch_size, height, width, channel].
"""
pad_height, pad_width = padding_shape
height, width = original_shape
batch_size = windows.shape[0] // (pad_height * pad_width // window_size // window_size)
hidden_states = windows.reshape(
batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1
)
hidden_states = (
hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(batch_size, pad_height, pad_width, -1)
)
hidden_states = hidden_states[:, :height, :width, :].contiguous()
return hidden_states
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.FloatTensor]:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
# Window partition
if self.window_size > 0:
height, width = hidden_states.shape[1], hidden_states.shape[2]
hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size)
hidden_states, attn_weights = self.attn(
hidden_states=hidden_states,
)
# Reverse window partition
if self.window_size > 0:
hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width))
hidden_states = residual + hidden_states
layernorm_output = self.layer_norm2(hidden_states)
hidden_states = hidden_states + self.mlp(layernorm_output)
return hidden_states
class SamVisionNeck(nn.Module):
def __init__(self, config: SamVisionConfig):
super().__init__()
self.config = config
self.conv1 = nn.Conv2d(config.hidden_size, config.output_channels, kernel_size=1, bias=False)
self.layer_norm1 = SamLayerNorm(config.output_channels, data_format="channels_first")
self.conv2 = nn.Conv2d(config.output_channels, config.output_channels, kernel_size=3, padding=1, bias=False)
self.layer_norm2 = SamLayerNorm(config.output_channels, data_format="channels_first")
def forward(self, hidden_states):
hidden_states = hidden_states.permute(0, 3, 1, 2)
hidden_states = self.conv1(hidden_states)
hidden_states = self.layer_norm1(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.layer_norm2(hidden_states)
return hidden_states
@auto_docstring
class SamPreTrainedModel(PreTrainedModel):
config: SamConfig
base_model_prefix = "sam"
main_input_name = "pixel_values"
_no_split_modules = ["SamVisionAttention"]
supports_gradient_checkpointing = True
_supports_sdpa = True
def _init_weights(self, module: nn.Module):
super()._init_weights(module)
if isinstance(module, SamVisionAttention):
if module.use_rel_pos:
module.rel_pos_h.data.zero_()
module.rel_pos_w.data.zero_()
elif isinstance(module, SamVisionEncoder):
if self.config.use_abs_pos:
module.pos_embed.data.zero_()
class SamVisionEncoder(SamPreTrainedModel):
_can_record_outputs = {"hidden_states": SamVisionLayer, "attentions": SamVisionAttention}
def __init__(self, config: SamVisionConfig):
super().__init__(config)
self.config = config
self.image_size = config.image_size
self.patch_embed = SamPatchEmbeddings(config)
self.pos_embed = None
if config.use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(
1,
config.image_size // config.patch_size,
config.image_size // config.patch_size,
config.hidden_size,
)
)
self.layers = nn.ModuleList()
for i in range(config.num_hidden_layers):
layer = SamVisionLayer(
config,
window_size=config.window_size if i not in config.global_attn_indexes else 0,
)
self.layers.append(layer)
self.neck = SamVisionNeck(config)
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.patch_embed
@check_model_inputs
def forward(
self, pixel_values: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs]
) -> SamVisionEncoderOutput:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.patch_embed(pixel_values)
if self.pos_embed is not None:
hidden_states = hidden_states + self.pos_embed
for layer_module in self.layers:
hidden_states = layer_module(hidden_states)
hidden_states = self.neck(hidden_states)
return SamVisionEncoderOutput(
last_hidden_state=hidden_states,
)
@auto_docstring(
custom_intro="""
The vision model from Sam without any head or projection on top.
"""
)
class SamVisionModel(SamPreTrainedModel):
config: SamVisionConfig
main_input_name = "pixel_values"
def __init__(self, config: SamVisionConfig):
super().__init__(config)
self.vision_encoder = SamVisionEncoder(config)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_encoder.patch_embed
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, SamVisionEncoderOutput]:
return self.vision_encoder(pixel_values, **kwargs)
@auto_docstring(
custom_intro="""
Segment Anything Model (SAM) for generating segmentation masks, given an input image and
input points and labels, boxes, or masks.
"""
)
class SamModel(SamPreTrainedModel):
_tied_weights_keys = ["prompt_encoder.shared_embedding.positional_embedding"]
# need to be ignored, as it's a buffer and will not be correctly detected as tied weight
_keys_to_ignore_on_load_missing = ["prompt_encoder.shared_embedding.positional_embedding"]
_can_record_outputs = {"mask_decoder_attentions": OutputRecorder(SamTwoWayAttentionBlock, index=2)}
def __init__(self, config: SamConfig):
super().__init__(config)
self.shared_image_embedding = SamPositionalEmbedding(config.vision_config)
self.vision_encoder = SamVisionEncoder(config.vision_config)
self.prompt_encoder = SamPromptEncoder(config)
# The module using it is not a PreTrainedModel subclass so we need this
config.mask_decoder_config._attn_implementation = config._attn_implementation
self.mask_decoder = SamMaskDecoder(config.mask_decoder_config)
self.post_init()
def _tie_weights(self):
self.prompt_encoder.shared_embedding.positional_embedding.data = (
self.shared_image_embedding.positional_embedding.data
)
def get_input_embeddings(self):
return self.vision_encoder.get_input_embeddings()
def get_image_wide_positional_embeddings(self):
size = self.config.prompt_encoder_config.image_embedding_size
target_device = self.shared_image_embedding.positional_embedding.device
target_dtype = self.shared_image_embedding.positional_embedding.dtype
grid = torch.ones((size, size), device=target_device, dtype=target_dtype)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / size
x_embed = x_embed / size
positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1))
return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width
@torch.no_grad()
def get_image_embeddings(self, pixel_values, **kwargs: Unpack[TransformersKwargs]):
r"""
Returns the image embeddings by passing the pixel values through the vision encoder.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Input pixel values
"""
vision_output = self.vision_encoder(
pixel_values,
**kwargs,
)
image_embeddings = vision_output[0]
return image_embeddings
@torch.no_grad()
def get_prompt_embeddings(
self,
input_points: Optional[torch.FloatTensor] = None,
input_labels: Optional[torch.LongTensor] = None,
input_boxes: Optional[torch.FloatTensor] = None,
input_masks: Optional[torch.LongTensor] = None,
):
r"""
Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder.
Args:
input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`):
Optional input points for the prompt encoder. The padding of the point is automatically done by the
processor. `point_batch_size` refers to the number of masks that we want the model to predict per
point. The model will output `point_batch_size` times 3 masks in total.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`):
Optional input labels for the prompt encoder. The padding of the labels is automatically done by the
processor, or can be fed by the user.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`):
Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the
processor. users can also pass manually the input boxes.
input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`):
Optional input masks for the prompt encoder.
"""
prompt_output = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
return prompt_output
@check_model_inputs
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
input_points: Optional[torch.FloatTensor] = None,
input_labels: Optional[torch.LongTensor] = None,
input_boxes: Optional[torch.FloatTensor] = None,
input_masks: Optional[torch.LongTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
multimask_output: bool = True,
attention_similarity: Optional[torch.FloatTensor] = None,
target_embedding: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> SamImageSegmentationOutput:
r"""
input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`):
Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
better results. The points can be obtained by passing a list of list of list to the processor that will
create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the
second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict
per input point), the third dimension is the number of points per segmentation mask (it is possible to pass
multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
coordinates of the point. If a different number of points is passed either for each image, or for each
mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
computation of the embedding will be skipped for these points using the labels.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`):
Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
official implementation, there are 3 types of labels
- `1`: the point is a point that contains the object of interest
- `0`: the point is a point that does not contain the object of interest
- `-1`: the point corresponds to the background
We added the label:
- `-10`: the point is a padding point, thus should be ignored by the prompt encoder
The padding labels should be automatically done by the processor.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`):
Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch
size, the number of boxes per image and the coordinates of the top left and bottom right point of the box.
In the order (`x1`, `y1`, `x2`, `y2`):
- `x1`: the x coordinate of the top left point of the input box
- `y1`: the y coordinate of the top left point of the input box
- `x2`: the x coordinate of the bottom right point of the input box
- `y2`: the y coordinate of the bottom right point of the input box
input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`):
SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`):
Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory
efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
method, and then feed them to the `forward` method instead of feeding the `pixel_values`.
multimask_output (`bool`, *optional*):
In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
"best" mask, by specifying `multimask_output=False`.
attention_similarity (`torch.FloatTensor`, *optional*):
Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the
model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
target_embedding (`torch.FloatTensor`, *optional*):
Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case
the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoModel, AutoProcessor
>>> model = AutoModel.from_pretrained("facebook/sam-vit-base")
>>> processor = AutoProcessor.from_pretrained("facebook/sam-vit-base")
>>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png"
>>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
>>> input_points = [[[400, 650]]] # 2D location of a window on the car
>>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt")
>>> # Get segmentation mask
>>> outputs = model(**inputs)
>>> # Postprocess masks
>>> masks = processor.post_process_masks(
... outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"]
... )
```
"""
if pixel_values is None and image_embeddings is None:
raise ValueError("Either pixel_values or image_embeddings must be provided.")
if pixel_values is not None and image_embeddings is not None:
raise ValueError("Only one of pixel_values and image_embeddings can be provided.")
if input_points is not None and len(input_points.shape) != 4:
raise ValueError(
"The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.",
f" got {input_points.shape}.",
)
if input_boxes is not None and len(input_boxes.shape) != 3:
raise ValueError(
"The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.",
f" got {input_boxes.shape}.",
)
if input_points is not None and input_boxes is not None:
point_batch_size = input_points.shape[1]
box_batch_size = input_boxes.shape[1]
if point_batch_size != box_batch_size:
raise ValueError(
f"You should provide as many bounding boxes as input points per box. Got {point_batch_size} and {box_batch_size}."
)
image_positional_embeddings = self.get_image_wide_positional_embeddings()
# repeat with batch size
batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings.shape[0]
image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)
vision_attentions = None
vision_hidden_states = None
if pixel_values is not None:
vision_outputs: SamVisionEncoderOutput = self.vision_encoder(pixel_values, **kwargs)
image_embeddings = vision_outputs.last_hidden_state
vision_hidden_states = vision_outputs.hidden_states
vision_attentions = vision_outputs.attentions
if input_points is not None and input_labels is None:
input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)
if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]:
raise ValueError(
"The batch size of the image embeddings and the input points must be the same. ",
f"Got {image_embeddings.shape[0]} and {input_points.shape[0]} respectively.",
" if you want to pass multiple points for the same image, make sure that you passed ",
" input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ",
" input_labels of shape (batch_size, point_batch_size, num_points_per_image)",
)
sparse_embeddings, dense_embeddings = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
low_res_masks, iou_predictions = self.mask_decoder(
image_embeddings=image_embeddings,
image_positional_embeddings=image_positional_embeddings,
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
attention_similarity=attention_similarity,
target_embedding=target_embedding,
)
return SamImageSegmentationOutput(
iou_scores=iou_predictions,
pred_masks=low_res_masks,
vision_hidden_states=vision_hidden_states,
vision_attentions=vision_attentions,
)
__all__ = ["SamVisionModel", "SamModel", "SamPreTrainedModel"]
| transformers/src/transformers/models/sam/modeling_sam.py/0 | {
"file_path": "transformers/src/transformers/models/sam/modeling_sam.py",
"repo_id": "transformers",
"token_count": 26006
} | 534 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for SAM2."""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BatchFeature
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
SizeDict,
)
from ...utils import (
TensorType,
is_torch_available,
is_vision_available,
)
from ...utils.import_utils import requires
from ...video_processing_utils import BaseVideoProcessor
if is_torch_available():
import torch
from torch.nn import functional as F_t
if is_vision_available():
from ...image_utils import PILImageResampling
@requires(backends=("torchvision",))
class Sam2VideoVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 1024, "width": 1024}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
def _preprocess(
self,
videos: list["torch.Tensor"],
size: SizeDict,
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
original_sizes = [video.shape[-2:] for video in videos]
reshaped_input_sizes = [(size.height, size.width) for _ in range(len(videos))]
batch_feature = super()._preprocess(videos, size=size, return_tensors=return_tensors, **kwargs)
batch_feature = BatchFeature(
data={
"original_sizes": original_sizes,
"reshaped_input_sizes": reshaped_input_sizes,
**batch_feature.data,
},
tensor_type=return_tensors,
)
return batch_feature
def post_process_masks(
self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[List[torch.Tensor], List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The size of each image as it is fed to the model, in (height, width) format. Used to remove padding.
mask_threshold (`float`, *optional*, defaults to 0.0):
The threshold to use for binarizing the masks.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
pad_size (`int`, *optional*, defaults to `self.pad_size`):
The target size the images were padded to before being passed to the model. If None, the target size is
assumed to be the processor's `pad_size`.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
pad_size = self.size if pad_size is None else pad_size
target_image_size = (pad_size["height"], pad_size["width"])
if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
original_sizes = original_sizes.tolist()
if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)):
reshaped_input_sizes = reshaped_input_sizes.tolist()
output_masks = []
for i, original_size in enumerate(original_sizes):
if isinstance(masks[i], np.ndarray):
masks[i] = torch.from_numpy(masks[i])
elif not isinstance(masks[i], torch.Tensor):
raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
interpolated_mask = F_t.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False)
interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]]
interpolated_mask = F_t.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False)
if binarize:
interpolated_mask = interpolated_mask > mask_threshold
output_masks.append(interpolated_mask)
return output_masks
__all__ = ["Sam2VideoVideoProcessor"]
| transformers/src/transformers/models/sam2_video/video_processing_sam2_video.py/0 | {
"file_path": "transformers/src/transformers/models/sam2_video/video_processing_sam2_video.py",
"repo_id": "transformers",
"token_count": 2110
} | 535 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SeamlessM4Tv2 model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class SeamlessM4Tv2Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`~SeamlessM4Tv2Model`]. It is used to instantiate
an SeamlessM4Tv2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SeamlessM4Tv2
[""](https://huggingface.co/"") architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256102):
Vocabulary size of the text modality of the SeamlessM4Tv2 model. Defines the number of different tokens
that can be represented by the `inputs_ids` passed when calling [`~SeamlessM4Tv2Model`],
[`~SeamlessM4Tv2ForTextToSpeech`] or [`~SeamlessM4Tv2ForTextToText`].
t2u_vocab_size (`int`, *optional*, defaults to 10082):
Unit vocabulary size of the SeamlessM4Tv2 model. Defines the number of different "unit tokens" that can be
represented by the `inputs_ids` passed when calling the Text-To-Units sub-model of [`~SeamlessM4Tv2Model`],
[`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
char_vocab_size (`int`, *optional*, defaults to 10943):
Character vocabulary size of the SeamlessM4Tv2 model. Defines the number of different character tokens that
can be represented by the `char_inputs_ids` passed when calling the Text-To-Units sub-model of
[`~SeamlessM4Tv2Model`], [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
> Parameters shared across sub-models
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" layers in the architecture.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model text encoder and decoder might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
encoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the encoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the decoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder and feed-forward layers. If string,
`"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, decoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all attention layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all activation layers in the model.
scale_embedding (`bool`, *optional*, defaults to `True`):
Scale embeddings by diving by sqrt(d_model).
> Text encoder and text decoder specific parameters
encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text encoder.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text encoder.
decoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text decoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text decoder.
decoder_start_token_id (`int`, *optional*, defaults to 3):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
applied in the text decoder.
max_new_tokens (`int`, *optional*, defaults to 256):
The maximum numbers of text tokens to generate, ignoring the number of tokens in the prompt.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the _padding_ text token. Only applied to the text-decoder model.
bos_token_id (`int`, *optional*, defaults to 2):
The id of the _beginning-of-stream_ text token. Only applied to the text-decoder model.
eos_token_id (`int`, *optional*, defaults to 3):
The id of the _end-of-stream_ text token. Only applied to the text-decoder model.
> Speech encoder specific parameters
speech_encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer speech encoder.
speech_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer speech encoder.
speech_encoder_intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer speech encoder.
speech_encoder_hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
The non-linear activation function (function or string) in the speech encoder. If string, `"gelu"`,
`"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
speech_encoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all layers in the speech encoder.
add_adapter (`bool`, *optional*, defaults to `True`):
Add an adapter layer on top of the speech encoder.
speech_encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability for the speech encoder. See the [LayerDrop paper](see
https://huggingface.co/papers/1909.11556) for more details.
feature_projection_input_dim (`int`, *optional*, defaults to 160):
Input dimension of the input feature projection of the speech encoder, i.e the dimension after processing
input audios with [`SeamlessM4TFeatureExtractor`].
adaptor_kernel_size (`int`, *optional*, defaults to 8):
Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_stride (`int`, *optional*, defaults to 8):
Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all layers in the speech adapter.
num_adapter_layers (`int`, *optional*, defaults to 1):
Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
True`.
position_embeddings_type (`str`, *optional*, defaults to `"relative_key"`):
Can be specified to `relative_key`. If left to `None`, no relative position embedding is applied. Only
applied to the speech encoder. For more information on `"relative_key"`, please refer to [Self-Attention
with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
Kernel size of convolutional depthwise 1D layer in Conformer blocks. Only applied to the speech encoder.
left_max_position_embeddings (`int`, *optional*, defaults to 64):
The left clipping value for relative positions.
right_max_position_embeddings (`int`, *optional*, defaults to 8):
The right clipping value for relative positions.
speech_encoder_chunk_size (`int`, *optional*, defaults to 20000): The size of each attention chunk.
speech_encoder_left_chunk_num (`int`, *optional*, defaults to 128):
Number of chunks on the left up to which lookahead is allowed.
> Text-To-Unit (t2u) model specific parameters
t2u_bos_token_id (`int`, *optional*, defaults to 0):
The id of the _beginning-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_pad_token_id (`int`, *optional*, defaults to 1):
The id of the _padding_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_eos_token_id (`int`, *optional*, defaults to 2):
The id of the _end-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_encoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit encoder.
t2u_encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit encoder.
t2u_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit encoder.
t2u_decoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit decoder.
t2u_decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit decoder.
t2u_decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit decoder.
t2u_max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model text-to-unit component might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
t2u_variance_predictor_embed_dim (`int`, *optional*, defaults to 1024):
The projection dimension of the text-to-unit's duration predictor.
t2u_variance_predictor_hidden_dim (`int`, *optional*, defaults to 256):
Internal dimension of the text-to-unit's duration predictor.
t2u_variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the convolutional layers of the text-to-unit's duration predictor.
t2u_variance_pred_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability of the text-to-unit's duration predictor.
> Hifi-Gan Vocoder specific parameters
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the output audio will be generated, expressed in hertz (Hz).
upsample_initial_channel (`int`, *optional*, defaults to 512):
The number of input channels into the hifi-gan upsampling network. Applies to the vocoder only.
upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[5, 4, 4, 2, 2]`):
A tuple of integers defining the stride of each 1D convolutional layer in the vocoder upsampling network.
The length of *upsample_rates* defines the number of convolutional layers and has to match the length of
*upsample_kernel_sizes*. Applies to the vocoder only.
upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[11, 8, 8, 4, 4]`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the vocoder upsampling
network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match
the length of *upsample_rates*. Applies to the vocoder only.
resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`):
A tuple of integers defining the kernel sizes of the vocoder 1D convolutional layers in the multi-receptive
field fusion (MRF) module. Applies to the vocoder only.
resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
A nested tuple of integers defining the dilation rates of the vocoder dilated 1D convolutional layers in
the multi-receptive field fusion (MRF) module. Applies to the vocoder only.
leaky_relu_slope (`float`, *optional*, defaults to 0.1):
The angle of the negative slope used by the leaky ReLU activation in the vocoder. Applies to the vocoder
only.
unit_hifi_gan_vocab_size (`int`, *optional*, defaults to 10000):
Vocabulary size of the SeamlessM4Tv2 vocoder. Defines the number of different unit tokens that can be
represented by the `inputs_ids` passed when calling the vocoder of [`~SeamlessM4Tv2Model`],
[`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
unit_embed_dim (`int`, *optional*, defaults to 1280):
The projection dimension of the input ids given to the hifi-gan vocoder. Applies to the vocoder only.
lang_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the target language given to the hifi-gan vocoder. Applies to the vocoder only.
spkr_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the speaker id given to the hifi-gan vocoder. Applies to the vocoder only.
vocoder_num_langs (`int`, *optional*, defaults to 36):
Number of langs supported by the vocoder. Might be different from `t2u_num_langs`.
vocoder_num_spkrs (`int`, *optional*, defaults to 200):
Number of speakers supported by the vocoder.
variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the duration predictor. Applies to the vocoder only.
var_pred_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability of the duration predictor. Applies to the vocoder only.
vocoder_offset (`int`, *optional*, defaults to 4):
Offset the unit token ids by this number to account for symbol tokens. Applies to the vocoder only.
```python
>>> from transformers import SeamlessM4Tv2Model, SeamlessM4Tv2Config
>>> # Initializing a SeamlessM4Tv2 "" style configuration
>>> configuration = SeamlessM4Tv2Config()
>>> # Initializing a model from the "" style configuration
>>> model = SeamlessM4Tv2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "seamless_m4t_v2"
def __init__(
self,
vocab_size=256102,
t2u_vocab_size=10082,
char_vocab_size=10943,
# shared config
hidden_size=1024,
initializer_range=0.02,
layer_norm_eps=1e-5,
use_cache=True,
max_position_embeddings=4096,
is_encoder_decoder=True,
encoder_layerdrop=0.05,
decoder_layerdrop=0.05,
activation_function="relu",
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.0,
scale_embedding=True,
# text encoder|decoder
encoder_layers=24,
encoder_ffn_dim=8192,
encoder_attention_heads=16,
decoder_layers=24,
decoder_ffn_dim=8192,
decoder_attention_heads=16,
decoder_start_token_id=3,
max_new_tokens=256,
pad_token_id=0,
bos_token_id=2,
eos_token_id=3,
# speech_encoder
speech_encoder_layers=24,
speech_encoder_attention_heads=16,
speech_encoder_intermediate_size=4096,
speech_encoder_hidden_act="swish",
speech_encoder_dropout=0.0,
add_adapter=True,
speech_encoder_layerdrop=0.1,
feature_projection_input_dim=160,
adaptor_kernel_size=8,
adaptor_stride=8,
adaptor_dropout=0.1,
num_adapter_layers=1,
position_embeddings_type="relative_key",
conv_depthwise_kernel_size=31,
left_max_position_embeddings=64,
right_max_position_embeddings=8,
speech_encoder_chunk_size=20000,
speech_encoder_left_chunk_num=128,
# t2u config
t2u_bos_token_id=0,
t2u_pad_token_id=1,
t2u_eos_token_id=2,
t2u_encoder_layers=6,
t2u_encoder_ffn_dim=8192,
t2u_encoder_attention_heads=16,
t2u_decoder_layers=6,
t2u_decoder_ffn_dim=8192,
t2u_decoder_attention_heads=16,
t2u_max_position_embeddings=4096,
t2u_variance_predictor_embed_dim=1024,
t2u_variance_predictor_hidden_dim=256,
t2u_variance_predictor_kernel_size=3,
t2u_variance_pred_dropout=0.5,
# hifi-gan vocoder config
sampling_rate=16000,
upsample_initial_channel=512,
upsample_rates=[5, 4, 4, 2, 2],
upsample_kernel_sizes=[11, 8, 8, 4, 4],
resblock_kernel_sizes=[3, 7, 11],
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
leaky_relu_slope=0.1,
# specific to Code Hifi-Gan
unit_hifi_gan_vocab_size=10000,
unit_embed_dim=1280,
lang_embed_dim=256,
spkr_embed_dim=256,
vocoder_num_langs=36,
vocoder_num_spkrs=200,
variance_predictor_kernel_size=3,
var_pred_dropout=0.5,
vocoder_offset=4,
**kwargs,
):
# overall_config
self.vocab_size = vocab_size
self.t2u_vocab_size = t2u_vocab_size
self.char_vocab_size = char_vocab_size
self.hidden_size = hidden_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.max_position_embeddings = max_position_embeddings
self.use_cache = use_cache
self.max_new_tokens = max_new_tokens
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.activation_function = activation_function
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.scale_embedding = scale_embedding
# for proper config init
self.num_attention_heads = decoder_attention_heads
self.num_hidden_layers = decoder_layers
# text|unit encoder|decoder
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_attention_heads = decoder_attention_heads
# speech_encoder
self.speech_encoder_layers = speech_encoder_layers
self.speech_encoder_hidden_act = speech_encoder_hidden_act
self.speech_encoder_dropout = speech_encoder_dropout
self.speech_encoder_attention_heads = speech_encoder_attention_heads
self.speech_encoder_layerdrop = speech_encoder_layerdrop
self.speech_encoder_intermediate_size = speech_encoder_intermediate_size
self.feature_projection_input_dim = feature_projection_input_dim
self.adaptor_kernel_size = adaptor_kernel_size
self.adaptor_stride = adaptor_stride
self.adaptor_dropout = adaptor_dropout
self.num_adapter_layers = num_adapter_layers
self.position_embeddings_type = position_embeddings_type
self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
self.add_adapter = add_adapter
self.left_max_position_embeddings = left_max_position_embeddings
self.right_max_position_embeddings = right_max_position_embeddings
self.speech_encoder_chunk_size = speech_encoder_chunk_size
self.speech_encoder_left_chunk_num = speech_encoder_left_chunk_num
# t2u config
self.t2u_bos_token_id = t2u_bos_token_id
self.t2u_pad_token_id = t2u_pad_token_id
self.t2u_eos_token_id = t2u_eos_token_id
self.t2u_encoder_layers = t2u_encoder_layers
self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim
self.t2u_encoder_attention_heads = t2u_encoder_attention_heads
self.t2u_decoder_layers = t2u_decoder_layers
self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim
self.t2u_decoder_attention_heads = t2u_decoder_attention_heads
self.t2u_max_position_embeddings = t2u_max_position_embeddings
self.t2u_variance_predictor_embed_dim = t2u_variance_predictor_embed_dim # TODO: add to docstrings
self.t2u_variance_predictor_hidden_dim = t2u_variance_predictor_hidden_dim # TODO: add to docstrings
self.t2u_variance_predictor_kernel_size = t2u_variance_predictor_kernel_size # TODO: add to docstrings
self.t2u_variance_pred_dropout = t2u_variance_pred_dropout # TODO: add to docstrings
# hifi-gan vocoder config
# original parameters specific to Hifi-Gan
self.sampling_rate = sampling_rate
self.upsample_initial_channel = upsample_initial_channel
self.upsample_rates = upsample_rates
self.upsample_kernel_sizes = upsample_kernel_sizes
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.leaky_relu_slope = leaky_relu_slope
# specific to Code Hifi-Gan
self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size
self.unit_embed_dim = unit_embed_dim
self.lang_embed_dim = lang_embed_dim
self.spkr_embed_dim = spkr_embed_dim
self.vocoder_num_langs = vocoder_num_langs
self.vocoder_num_spkrs = vocoder_num_spkrs
self.variance_predictor_kernel_size = variance_predictor_kernel_size
self.var_pred_dropout = var_pred_dropout
self.vocoder_offset = vocoder_offset
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
is_encoder_decoder=is_encoder_decoder,
max_position_embeddings=max_position_embeddings,
**kwargs,
)
__all__ = ["SeamlessM4Tv2Config"]
| transformers/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py/0 | {
"file_path": "transformers/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py",
"repo_id": "transformers",
"token_count": 9887
} | 536 |
# coding=utf-8
# Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class ShieldGemma2Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ShieldGemma2ForImageClassification`]. It is used to instantiate an
ShieldGemma2ForImageClassification according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the shieldgemma-2-4b-it.
e.g. [google/gemma-3-4b](https://huggingface.co/google/gemma-3-4b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`Union[ShieldGemma2TextConfig, dict]`, *optional*):
The config object of the text backbone.
vision_config (`Union[AutoConfig, dict]`, *optional*):
Custom vision config or dict.
mm_tokens_per_image (`int`, *optional*, defaults to 256):
The number of tokens per image embedding.
boi_token_index (`int`, *optional*, defaults to 255999):
The begin-of-image token index to wrap the image prompt.
eoi_token_index (`int`, *optional*, defaults to 256000):
The end-of-image token index to wrap the image prompt.
image_token_index (`int`, *optional*, defaults to 262144):
The image token index to encode the image prompt.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import ShieldGemma2ForConditionalGeneration, ShieldGemma2Config, SiglipVisionConfig, ShieldGemma2TextConfig
>>> # Initializing a Siglip-like vision config
>>> vision_config = SiglipVisionConfig()
>>> # Initializing a ShieldGemma2 Text config
>>> text_config = ShieldGemma2TextConfig()
>>> # Initializing a ShieldGemma2 gemma-3-4b style configuration
>>> configuration = ShieldGemma2Config(vision_config, text_config)
>>> # Initializing a model from the gemma-3-4b style configuration
>>> model = ShieldGemma2TextConfig(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "shieldgemma2"
attribute_map = {
"image_token_id": "image_token_index",
"boi_token_id": "boi_token_index",
"eoi_token_id": "eoi_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
text_config=None,
vision_config=None,
mm_tokens_per_image: int = 256,
boi_token_index: int = 255_999,
eoi_token_index: int = 256_000,
image_token_index: int = 262_144,
initializer_range: float = 0.02,
**kwargs,
):
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "siglip_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["siglip_vision_model"]()
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "gemma3_text")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["gemma3_text"]()
self.text_config = text_config
self.vision_config = vision_config
self.mm_tokens_per_image = mm_tokens_per_image
self.boi_token_index = boi_token_index
self.eoi_token_index = eoi_token_index
self.image_token_index = image_token_index
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = ["ShieldGemma2Config"]
| transformers/src/transformers/models/shieldgemma2/configuration_shieldgemma2.py/0 | {
"file_path": "transformers/src/transformers/models/shieldgemma2/configuration_shieldgemma2.py",
"repo_id": "transformers",
"token_count": 1763
} | 537 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for SigLIP2."""
from typing import Optional, Union
import torch
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
DefaultFastImageProcessorKwargs,
SizeDict,
)
from ...image_utils import (
ImageInput,
PILImageResampling,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
is_torch_available,
is_torchvision_available,
is_torchvision_v2_available,
logging,
)
from .image_processing_siglip2 import get_image_size_for_max_num_patches
if is_torch_available():
import torch
if is_torchvision_available():
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F
else:
from torchvision.transforms import functional as F
logger = logging.get_logger(__name__)
def convert_image_to_patches(image: "torch.Tensor", patch_size: int) -> "torch.Tensor":
"""
Convert 3D tensor image of shape (num_channels, image_height, image_width) into 2D tensor of patches of shape
(num_patches_height * num_patches_width, patch_size * patch_size * num_channels).
"""
num_channels, image_height, image_width = image.shape
num_patches_height = image_height // patch_size
num_patches_width = image_width // patch_size
patched_image = image.reshape(num_channels, num_patches_height, patch_size, num_patches_width, patch_size)
patched_image = patched_image.permute(1, 3, 2, 4, 0)
patched_image = patched_image.reshape(num_patches_height * num_patches_width, -1)
return patched_image
def pad_along_first_dim(
tensor: "torch.Tensor", target_length: int, pad_value: int = 0
) -> tuple["torch.Tensor", "torch.Tensor"]:
"""
Pad the tensor along the first dimension.
"""
current_length = tensor.shape[0]
padding_length = target_length - current_length
mask = torch.ones((target_length,), dtype=torch.int32)
if padding_length > 0:
padding = [0, 0] * (tensor.ndim - 1) + [0, padding_length]
tensor = torch.nn.functional.pad(tensor, padding, mode="constant", value=pad_value)
mask[-padding_length:] = 0
return tensor, mask
class Siglip2FastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
"""
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch the image will be split to.
max_num_patches (`int`, *optional*, defaults to 256):
The image will be resized to have at most this number of patches,
and then padded in "patch" dimension to match this number exactly.
"""
patch_size: Optional[int]
max_num_patches: Optional[int]
@auto_docstring
class Siglip2ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = [0.5, 0.5, 0.5]
image_std = [0.5, 0.5, 0.5]
do_resize = True
do_rescale = True
do_normalize = True
patch_size = 16
max_num_patches = 256
valid_kwargs = Siglip2FastImageProcessorKwargs
unused_kwargs = ["size", "do_center_crop", "crop_size"]
def __init__(self, **kwargs: Unpack[Siglip2FastImageProcessorKwargs]):
super().__init__(**kwargs)
def _validate_preprocess_kwargs(self, **kwargs) -> tuple:
# Remove do_resize from kwargs to not raise an error as size is None
kwargs.pop("do_resize", None)
return super()._validate_preprocess_kwargs(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[Siglip2FastImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
patch_size: int,
max_num_patches: int,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
pixel_masks = []
pixel_values = []
spatial_shapes = []
for image in images:
if do_resize:
height, width = get_image_size_for_max_num_patches(
image_height=image.shape[1],
image_width=image.shape[2],
patch_size=patch_size,
max_num_patches=max_num_patches,
)
side_dict = SizeDict(height=height, width=width)
image = self.resize(image=image, size=side_dict, interpolation=interpolation)
image = self.rescale_and_normalize(image, do_rescale, rescale_factor, do_normalize, image_mean, image_std)
# (num_channels, height, width) -> (num_patches, patch_size * patch_size * num_channels)
patches = convert_image_to_patches(image, patch_size)
patches, mask = pad_along_first_dim(patches, max_num_patches)
num_patches_height = image.shape[1] // patch_size
num_patches_width = image.shape[2] // patch_size
spatial_shapes.append((num_patches_height, num_patches_width))
pixel_values.append(patches)
pixel_masks.append(mask)
pixel_values = torch.stack(pixel_values)
pixel_masks = torch.stack(pixel_masks)
spatial_shapes = torch.tensor(spatial_shapes)
batch_feature = BatchFeature(
data={
"pixel_values": pixel_values,
"pixel_attention_mask": pixel_masks,
"spatial_shapes": spatial_shapes,
},
tensor_type=return_tensors,
)
return batch_feature
__all__ = ["Siglip2ImageProcessorFast"]
| transformers/src/transformers/models/siglip2/image_processing_siglip2_fast.py/0 | {
"file_path": "transformers/src/transformers/models/siglip2/image_processing_siglip2_fast.py",
"repo_id": "transformers",
"token_count": 2651
} | 538 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert SpeechT5 HiFi-GAN checkpoint."""
import argparse
import numpy as np
import torch
from transformers import SpeechT5HifiGan, SpeechT5HifiGanConfig, logging
logging.set_verbosity_info()
logger = logging.get_logger("transformers.models.speecht5")
def load_weights(checkpoint, hf_model, config):
hf_model.apply_weight_norm()
hf_model.conv_pre.weight_g.data = checkpoint["input_conv.weight_g"]
hf_model.conv_pre.weight_v.data = checkpoint["input_conv.weight_v"]
hf_model.conv_pre.bias.data = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates)):
hf_model.upsampler[i].weight_g.data = checkpoint[f"upsamples.{i}.1.weight_g"]
hf_model.upsampler[i].weight_v.data = checkpoint[f"upsamples.{i}.1.weight_v"]
hf_model.upsampler[i].bias.data = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates) * len(config.resblock_kernel_sizes)):
for j in range(len(config.resblock_dilation_sizes)):
hf_model.resblocks[i].convs1[j].weight_g.data = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
hf_model.resblocks[i].convs1[j].weight_v.data = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
hf_model.resblocks[i].convs1[j].bias.data = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
hf_model.resblocks[i].convs2[j].weight_g.data = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
hf_model.resblocks[i].convs2[j].weight_v.data = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
hf_model.resblocks[i].convs2[j].bias.data = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
hf_model.conv_post.weight_g.data = checkpoint["output_conv.1.weight_g"]
hf_model.conv_post.weight_v.data = checkpoint["output_conv.1.weight_v"]
hf_model.conv_post.bias.data = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def convert_hifigan_checkpoint(
checkpoint_path,
stats_path,
pytorch_dump_folder_path,
config_path=None,
repo_id=None,
):
if config_path is not None:
config = SpeechT5HifiGanConfig.from_pretrained(config_path)
else:
config = SpeechT5HifiGanConfig()
model = SpeechT5HifiGan(config)
orig_checkpoint = torch.load(checkpoint_path, weights_only=True)
load_weights(orig_checkpoint["model"]["generator"], model, config)
stats = np.load(stats_path)
mean = stats[0].reshape(-1)
scale = stats[1].reshape(-1)
model.mean = torch.from_numpy(mean).float()
model.scale = torch.from_numpy(scale).float()
model.save_pretrained(pytorch_dump_folder_path)
if repo_id:
print("Pushing to the hub...")
model.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
args = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| transformers/src/transformers/models/speecht5/convert_hifigan.py/0 | {
"file_path": "transformers/src/transformers/models/speecht5/convert_hifigan.py",
"repo_id": "transformers",
"token_count": 1712
} | 539 |
# coding=utf-8
# Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for SqueezeBERT."""
import json
from typing import Optional
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with Bert->SqueezeBert,BERT->SqueezeBERT
class SqueezeBertTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" SqueezeBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (`bool`, *optional*, defaults to `True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original SqueezeBERT).
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
The prefix for subwords.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = SqueezeBertTokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs,
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", do_lower_case) != do_lower_case
or normalizer_state.get("strip_accents", strip_accents) != strip_accents
or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
):
normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
normalizer_state["lowercase"] = do_lower_case
normalizer_state["strip_accents"] = strip_accents
normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
self.do_lower_case = do_lower_case
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A SqueezeBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1 is not None:
output += token_ids_1 + [self.sep_token_id]
return output
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
__all__ = ["SqueezeBertTokenizerFast"]
| transformers/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py/0 | {
"file_path": "transformers/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py",
"repo_id": "transformers",
"token_count": 2605
} | 540 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for SuperPoint."""
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
from ... import is_torch_available, is_vision_available
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
infer_channel_dimension_format,
is_scaled_image,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging, requires_backends
if is_torch_available():
import torch
if TYPE_CHECKING:
from .modeling_superpoint import SuperPointKeypointDescriptionOutput
if is_vision_available():
import PIL
logger = logging.get_logger(__name__)
def is_grayscale(
image: np.ndarray,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
if input_data_format == ChannelDimension.FIRST:
if image.shape[0] == 1:
return True
return np.all(image[0, ...] == image[1, ...]) and np.all(image[1, ...] == image[2, ...])
elif input_data_format == ChannelDimension.LAST:
if image.shape[-1] == 1:
return True
return np.all(image[..., 0] == image[..., 1]) and np.all(image[..., 1] == image[..., 2])
def convert_to_grayscale(
image: ImageInput,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> ImageInput:
"""
Converts an image to grayscale format using the NTSC formula. Only support numpy and PIL Image. TODO support torch
and tensorflow grayscale conversion
This function is supposed to return a 1-channel image, but it returns a 3-channel image with the same value in each
channel, because of an issue that is discussed in :
https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
Args:
image (Image):
The image to convert.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image.
"""
requires_backends(convert_to_grayscale, ["vision"])
if isinstance(image, np.ndarray):
if is_grayscale(image, input_data_format=input_data_format):
return image
if input_data_format == ChannelDimension.FIRST:
gray_image = image[0, ...] * 0.2989 + image[1, ...] * 0.5870 + image[2, ...] * 0.1140
gray_image = np.stack([gray_image] * 3, axis=0)
elif input_data_format == ChannelDimension.LAST:
gray_image = image[..., 0] * 0.2989 + image[..., 1] * 0.5870 + image[..., 2] * 0.1140
gray_image = np.stack([gray_image] * 3, axis=-1)
return gray_image
if not isinstance(image, PIL.Image.Image):
return image
image = image.convert("L")
return image
class SuperPointImageProcessor(BaseImageProcessor):
r"""
Constructs a SuperPoint image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
by `do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
`True`. Can be overridden by `size` in the `preprocess` method.
resample (`Resampling`, *optional*, defaults to `2`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_grayscale (`bool`, *optional*, defaults to `False`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: float = 1 / 255,
do_grayscale: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 480, "width": 640}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_grayscale = do_grayscale
def resize(
self,
image: np.ndarray,
size: dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
):
"""
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be inferred from the input
image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
size = get_size_dict(size, default_to_square=False)
return resize(
image,
size=(size["height"], size["width"]),
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def preprocess(
self,
images,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_grayscale: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
`(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_grayscale (`bool`, *optional*, defaults to `self.do_grayscale`):
Whether to convert the image to grayscale.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_grayscale = do_grayscale if do_grayscale is not None else self.do_grayscale
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_grayscale:
images = [convert_to_grayscale(image, input_data_format=input_data_format) for image in images]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_keypoint_detection(
self, outputs: "SuperPointKeypointDescriptionOutput", target_sizes: Union[TensorType, list[tuple]]
) -> list[dict[str, "torch.Tensor"]]:
"""
Converts the raw output of [`SuperPointForKeypointDetection`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`SuperPointKeypointDescriptionOutput`]):
Raw outputs of the model containing keypoints in a relative (x, y) format, with scores and descriptors.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. This must be the original
image size (before any processing).
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the keypoints in absolute format according
to target_sizes, scores and descriptors for an image in the batch as predicted by the model.
"""
if len(outputs.mask) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the mask")
if isinstance(target_sizes, list):
image_sizes = torch.tensor(target_sizes, device=outputs.mask.device)
else:
if target_sizes.shape[1] != 2:
raise ValueError(
"Each element of target_sizes must contain the size (h, w) of each image of the batch"
)
image_sizes = target_sizes
# Flip the image sizes to (width, height) and convert keypoints to absolute coordinates
image_sizes = torch.flip(image_sizes, [1])
masked_keypoints = outputs.keypoints * image_sizes[:, None]
# Convert masked_keypoints to int
masked_keypoints = masked_keypoints.to(torch.int32)
results = []
for image_mask, keypoints, scores, descriptors in zip(
outputs.mask, masked_keypoints, outputs.scores, outputs.descriptors
):
indices = torch.nonzero(image_mask).squeeze(1)
keypoints = keypoints[indices]
scores = scores[indices]
descriptors = descriptors[indices]
results.append({"keypoints": keypoints, "scores": scores, "descriptors": descriptors})
return results
__all__ = ["SuperPointImageProcessor"]
| transformers/src/transformers/models/superpoint/image_processing_superpoint.py/0 | {
"file_path": "transformers/src/transformers/models/superpoint/image_processing_superpoint.py",
"repo_id": "transformers",
"token_count": 6777
} | 541 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Swin2SR checkpoints from the original repository. URL: https://github.com/mv-lab/swin2sr"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import Swin2SRConfig, Swin2SRForImageSuperResolution, Swin2SRImageProcessor
def get_config(checkpoint_url):
config = Swin2SRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
config.upscale = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
config.upscale = 4
config.image_size = 48
config.upsampler = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
config.depths = [6, 6, 6, 6]
config.embed_dim = 60
config.num_heads = [6, 6, 6, 6]
config.upsampler = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
config.upscale = 4
config.upsampler = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
config.num_channels = 1
config.upscale = 1
config.image_size = 126
config.window_size = 7
config.img_range = 255.0
config.upsampler = ""
return config
def rename_key(name, config):
if "patch_embed.proj" in name and "layers" not in name:
name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
name = name.replace("patch_embed.norm", "embeddings.patch_embeddings.layernorm")
if "layers" in name:
name = name.replace("layers", "encoder.stages")
if "residual_group.blocks" in name:
name = name.replace("residual_group.blocks", "layers")
if "attn.proj" in name:
name = name.replace("attn.proj", "attention.output.dense")
if "attn" in name:
name = name.replace("attn", "attention.self")
if "norm1" in name:
name = name.replace("norm1", "layernorm_before")
if "norm2" in name:
name = name.replace("norm2", "layernorm_after")
if "mlp.fc1" in name:
name = name.replace("mlp.fc1", "intermediate.dense")
if "mlp.fc2" in name:
name = name.replace("mlp.fc2", "output.dense")
if "q_bias" in name:
name = name.replace("q_bias", "query.bias")
if "k_bias" in name:
name = name.replace("k_bias", "key.bias")
if "v_bias" in name:
name = name.replace("v_bias", "value.bias")
if "cpb_mlp" in name:
name = name.replace("cpb_mlp", "continuous_position_bias_mlp")
if "patch_embed.proj" in name:
name = name.replace("patch_embed.proj", "patch_embed.projection")
if name == "norm.weight":
name = "layernorm.weight"
if name == "norm.bias":
name = "layernorm.bias"
if "conv_first" in name:
name = name.replace("conv_first", "first_convolution")
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
name = name.replace("conv_last", "final_convolution")
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
name = name.replace("conv_before_upsample.0", "conv_before_upsample")
if "upsample.0" in name:
name = name.replace("upsample.0", "upsample.convolution_0")
if "upsample.2" in name:
name = name.replace("upsample.2", "upsample.convolution_1")
name = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
name = name.replace("upsample.0.weight", "upsample.conv.weight")
name = name.replace("upsample.0.bias", "upsample.conv.bias")
else:
pass
else:
name = "swin2sr." + name
return name
def convert_state_dict(orig_state_dict, config):
for key in orig_state_dict.copy():
val = orig_state_dict.pop(key)
if "qkv" in key:
key_split = key.split(".")
stage_num = int(key_split[1])
block_num = int(key_split[4])
dim = config.embed_dim
if "weight" in key:
orig_state_dict[
f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.query.weight"
] = val[:dim, :]
orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.key.weight"] = (
val[dim : dim * 2, :]
)
orig_state_dict[
f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.value.weight"
] = val[-dim:, :]
else:
orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.query.bias"] = (
val[:dim]
)
orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.key.bias"] = (
val[dim : dim * 2]
)
orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.value.bias"] = (
val[-dim:]
)
pass
else:
orig_state_dict[rename_key(key, config)] = val
return orig_state_dict
def convert_swin2sr_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub):
config = get_config(checkpoint_url)
model = Swin2SRForImageSuperResolution(config)
model.eval()
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
new_state_dict = convert_state_dict(state_dict, config)
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
if len(missing_keys) > 0:
raise ValueError(f"Missing keys when converting: {missing_keys}")
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict")
# verify values
url = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
processor = Swin2SRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
image_size = 126 if "Jpeg" in checkpoint_url else 256
transforms = Compose(
[
Resize((image_size, image_size)),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
pixel_values = transforms(image).unsqueeze(0)
if config.num_channels == 1:
pixel_values = pixel_values[:, 0, :, :].unsqueeze(1)
outputs = model(pixel_values)
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
expected_shape = torch.Size([1, 3, 512, 512])
expected_slice = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]]
)
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
expected_shape = torch.Size([1, 3, 1024, 1024])
expected_slice = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]]
)
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
expected_shape = torch.Size([1, 3, 1024, 1024])
expected_slice = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]]
)
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
expected_shape = torch.Size([1, 3, 512, 512])
expected_slice = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]]
)
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
expected_shape = torch.Size([1, 3, 1024, 1024])
expected_slice = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]]
)
assert outputs.reconstruction.shape == expected_shape, (
f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
)
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], expected_slice, atol=1e-3)
print("Looks ok!")
url_to_name = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
model_name = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}")
processor.push_to_hub(f"caidas/{model_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
args = parser.parse_args()
convert_swin2sr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| transformers/src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py",
"repo_id": "transformers",
"token_count": 5321
} | 542 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert T5X checkpoints from the original repository to JAX/FLAX model."""
import argparse
from t5x import checkpoints
from transformers import FlaxT5ForConditionalGeneration, T5Config
def convert_t5x_checkpoint_to_flax(t5x_checkpoint_path, config_name, flax_dump_folder_path):
config = T5Config.from_pretrained(config_name)
flax_model = FlaxT5ForConditionalGeneration(config=config)
t5x_model = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
split_mlp_wi = "wi_0" in t5x_model["target"]["encoder"]["layers_0"]["mlp"]
# Encoder
for layer_index in range(config.num_layers):
layer_name = f"layers_{str(layer_index)}"
# Self-Attention
t5x_attention_key = t5x_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
t5x_attention_out = t5x_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
t5x_attention_query = t5x_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
t5x_attention_value = t5x_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Layer Normalization
t5x_attention_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
t5x_mlp_wi_0 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
t5x_mlp_wi_1 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
t5x_mlp_wi = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
t5x_mlp_wo = t5x_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
t5x_mlp_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["k"]["kernel"] = (
t5x_attention_key
)
flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["o"]["kernel"] = (
t5x_attention_out
)
flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["q"]["kernel"] = (
t5x_attention_query
)
flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["v"]["kernel"] = (
t5x_attention_value
)
flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["0"]["layer_norm"]["weight"] = (
t5x_attention_layer_norm
)
if split_mlp_wi:
flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["1"]["DenseReluDense"]["wi_0"][
"kernel"
] = t5x_mlp_wi_0
flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["1"]["DenseReluDense"]["wi_1"][
"kernel"
] = t5x_mlp_wi_1
else:
flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["1"]["DenseReluDense"]["wi"]["kernel"] = (
t5x_mlp_wi
)
flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["1"]["DenseReluDense"]["wo"]["kernel"] = (
t5x_mlp_wo
)
flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["1"]["layer_norm"]["weight"] = (
t5x_mlp_layer_norm
)
# Only for layer 0:
t5x_encoder_rel_embedding = t5x_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
flax_model.params["encoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][
"embedding"
] = t5x_encoder_rel_embedding
# Assigning
t5x_encoder_norm = t5x_model["target"]["encoder"]["encoder_norm"]["scale"]
flax_model.params["encoder"]["final_layer_norm"]["weight"] = t5x_encoder_norm
# Decoder
for layer_index in range(config.num_decoder_layers):
layer_name = f"layers_{str(layer_index)}"
# Self-Attention
t5x_attention_key = t5x_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
t5x_attention_out = t5x_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
t5x_attention_query = t5x_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
t5x_attention_value = t5x_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
t5x_pre_attention_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
t5x_enc_dec_attention_key = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]["key"][
"kernel"
]
t5x_enc_dec_attention_out = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]["out"][
"kernel"
]
t5x_enc_dec_attention_query = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]["query"][
"kernel"
]
t5x_enc_dec_attention_value = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]["value"][
"kernel"
]
# Layer Normalization
t5x_cross_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
t5x_mlp_wi_0 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
t5x_mlp_wi_1 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
t5x_mlp_wi = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
t5x_mlp_wo = t5x_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
tx5_mlp_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["k"]["kernel"] = (
t5x_attention_key
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["o"]["kernel"] = (
t5x_attention_out
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["q"]["kernel"] = (
t5x_attention_query
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["v"]["kernel"] = (
t5x_attention_value
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["0"]["layer_norm"]["weight"] = (
t5x_pre_attention_layer_norm
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["1"]["EncDecAttention"]["k"]["kernel"] = (
t5x_enc_dec_attention_key
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["1"]["EncDecAttention"]["o"]["kernel"] = (
t5x_enc_dec_attention_out
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["1"]["EncDecAttention"]["q"]["kernel"] = (
t5x_enc_dec_attention_query
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["1"]["EncDecAttention"]["v"]["kernel"] = (
t5x_enc_dec_attention_value
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["1"]["layer_norm"]["weight"] = (
t5x_cross_layer_norm
)
if split_mlp_wi:
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["2"]["DenseReluDense"]["wi_0"][
"kernel"
] = t5x_mlp_wi_0
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["2"]["DenseReluDense"]["wi_1"][
"kernel"
] = t5x_mlp_wi_1
else:
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["2"]["DenseReluDense"]["wi"]["kernel"] = (
t5x_mlp_wi
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["2"]["DenseReluDense"]["wo"]["kernel"] = (
t5x_mlp_wo
)
flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["2"]["layer_norm"]["weight"] = (
tx5_mlp_layer_norm
)
# Decoder Normalization
tx5_decoder_norm = t5x_model["target"]["decoder"]["decoder_norm"]["scale"]
flax_model.params["decoder"]["final_layer_norm"]["weight"] = tx5_decoder_norm
# Only for layer 0:
t5x_decoder_rel_embedding = t5x_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
flax_model.params["decoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][
"embedding"
] = t5x_decoder_rel_embedding
# Token Embeddings
tx5_token_embeddings = t5x_model["target"]["token_embedder"]["embedding"]
flax_model.params["shared"]["embedding"] = tx5_token_embeddings
# LM Head (only in v1.1 checkpoints)
if "logits_dense" in t5x_model["target"]["decoder"]:
flax_model.params["lm_head"]["kernel"] = t5x_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(flax_dump_folder_path)
print("T5X Model was successfully converted!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the TX5 checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
args = parser.parse_args()
convert_t5x_checkpoint_to_flax(args.t5x_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| transformers/src/transformers/models/t5/convert_t5x_checkpoint_to_flax.py/0 | {
"file_path": "transformers/src/transformers/models/t5/convert_t5x_checkpoint_to_flax.py",
"repo_id": "transformers",
"token_count": 5125
} | 543 |
# coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Table Transformer model."""
import math
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import Tensor, nn
from ...activations import ACT2FN
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import (
ModelOutput,
auto_docstring,
is_timm_available,
logging,
requires_backends,
)
from ...utils.backbone_utils import load_backbone
from .configuration_table_transformer import TableTransformerConfig
if is_timm_available():
from timm import create_model
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the TABLE_TRANSFORMER decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions,
namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them
gone through a layernorm. This is useful when training the model with auxiliary decoding losses.
"""
)
# Copied from transformers.models.detr.modeling_detr.DetrDecoderOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions):
r"""
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
"""
intermediate_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the TABLE_TRANSFORMER encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput,
namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them
gone through a layernorm. This is useful when training the model with auxiliary decoding losses.
"""
)
# Copied from transformers.models.detr.modeling_detr.DetrModelOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
class TableTransformerModelOutput(Seq2SeqModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
"""
intermediate_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`TableTransformerForObjectDetection`].
"""
)
# Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->TableTransformer,DetrImageProcessor->DetrImageProcessor
class TableTransformerObjectDetectionOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~TableTransformerImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
# Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->TableTransformer
class TableTransformerFrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it user-friendly
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->TableTransformer
def replace_batch_norm(model):
r"""
Recursively replace all `torch.nn.BatchNorm2d` with `TableTransformerFrozenBatchNorm2d`.
Args:
model (torch.nn.Module):
input model
"""
for name, module in model.named_children():
if isinstance(module, nn.BatchNorm2d):
new_module = TableTransformerFrozenBatchNorm2d(module.num_features)
if module.weight.device != torch.device("meta"):
new_module.weight.data.copy_(module.weight)
new_module.bias.data.copy_(module.bias)
new_module.running_mean.data.copy_(module.running_mean)
new_module.running_var.data.copy_(module.running_var)
model._modules[name] = new_module
if len(list(module.children())) > 0:
replace_batch_norm(module)
# Copied from transformers.models.detr.modeling_detr.DetrConvEncoder with Detr->TableTransformer
class TableTransformerConvEncoder(nn.Module):
"""
Convolutional backbone, using either the AutoBackbone API or one from the timm library.
nn.BatchNorm2d layers are replaced by TableTransformerFrozenBatchNorm2d as defined above.
"""
def __init__(self, config):
super().__init__()
self.config = config
# For backwards compatibility we have to use the timm library directly instead of the AutoBackbone API
if config.use_timm_backbone:
# We default to values which were previously hard-coded. This enables configurability from the config
# using backbone arguments, while keeping the default behavior the same.
requires_backends(self, ["timm"])
kwargs = getattr(config, "backbone_kwargs", {})
kwargs = {} if kwargs is None else kwargs.copy()
out_indices = kwargs.pop("out_indices", (1, 2, 3, 4))
num_channels = kwargs.pop("in_chans", config.num_channels)
if config.dilation:
kwargs["output_stride"] = kwargs.get("output_stride", 16)
backbone = create_model(
config.backbone,
pretrained=config.use_pretrained_backbone,
features_only=True,
out_indices=out_indices,
in_chans=num_channels,
**kwargs,
)
else:
backbone = load_backbone(config)
# replace batch norm by frozen batch norm
with torch.no_grad():
replace_batch_norm(backbone)
self.model = backbone
self.intermediate_channel_sizes = (
self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
)
backbone_model_type = None
if config.backbone is not None:
backbone_model_type = config.backbone
elif config.backbone_config is not None:
backbone_model_type = config.backbone_config.model_type
else:
raise ValueError("Either `backbone` or `backbone_config` should be provided in the config")
if "resnet" in backbone_model_type:
for name, parameter in self.model.named_parameters():
if config.use_timm_backbone:
if "layer2" not in name and "layer3" not in name and "layer4" not in name:
parameter.requires_grad_(False)
else:
if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name:
parameter.requires_grad_(False)
def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
# send pixel_values through the model to get list of feature maps
features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
out = []
for feature_map in features:
# downsample pixel_mask to match shape of corresponding feature_map
mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
out.append((feature_map, mask))
return out
# Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->TableTransformer
class TableTransformerConvModel(nn.Module):
"""
This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder.
"""
def __init__(self, conv_encoder, position_embedding):
super().__init__()
self.conv_encoder = conv_encoder
self.position_embedding = position_embedding
def forward(self, pixel_values, pixel_mask):
# send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples
out = self.conv_encoder(pixel_values, pixel_mask)
pos = []
for feature_map, mask in out:
# position encoding
pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype))
return out, pos
# Copied from transformers.models.detr.modeling_detr.DetrSinePositionEmbedding with Detr->TableTransformer
class TableTransformerSinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.embedding_dim = embedding_dim
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, pixel_values, pixel_mask):
if pixel_mask is None:
raise ValueError("No pixel mask provided")
y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale
dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float()
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
# Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding with Detr->TableTransformer
class TableTransformerLearnedPositionEmbedding(nn.Module):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, embedding_dim=256):
super().__init__()
self.row_embeddings = nn.Embedding(50, embedding_dim)
self.column_embeddings = nn.Embedding(50, embedding_dim)
def forward(self, pixel_values, pixel_mask=None):
height, width = pixel_values.shape[-2:]
width_values = torch.arange(width, device=pixel_values.device)
height_values = torch.arange(height, device=pixel_values.device)
x_emb = self.column_embeddings(width_values)
y_emb = self.row_embeddings(height_values)
pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
pos = pos.permute(2, 0, 1)
pos = pos.unsqueeze(0)
pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
return pos
# Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->TableTransformer
def build_position_encoding(config):
n_steps = config.d_model // 2
if config.position_embedding_type == "sine":
# TODO find a better way of exposing other arguments
position_embedding = TableTransformerSinePositionEmbedding(n_steps, normalize=True)
elif config.position_embedding_type == "learned":
position_embedding = TableTransformerLearnedPositionEmbedding(n_steps)
else:
raise ValueError(f"Not supported {config.position_embedding_type}")
return position_embedding
# Copied from transformers.models.detr.modeling_detr.DetrAttention with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
class TableTransformerAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Here, we add position embeddings to the queries and keys (as explained in the TABLE_TRANSFORMER paper).
"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor]):
return tensor if object_queries is None else tensor + object_queries
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
object_queries: Optional[torch.Tensor] = None,
key_value_states: Optional[torch.Tensor] = None,
spatial_position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size, target_len, embed_dim = hidden_states.size()
# add position embeddings to the hidden states before projecting to queries and keys
if object_queries is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, object_queries)
# add key-value position embeddings to the key value states
if spatial_position_embeddings is not None:
key_value_states_original = key_value_states
key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (batch_size, 1, target_len, source_len):
raise ValueError(
f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is"
f" {attention_mask.size()}"
)
if attention_mask.dtype == torch.bool:
attention_mask = torch.zeros_like(attention_mask, dtype=attn_weights.dtype).masked_fill_(
attention_mask, -torch.inf
)
attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
class TableTransformerEncoderLayer(nn.Module):
# Copied from transformers.models.detr.modeling_detr.DetrEncoderLayer.__init__ with Detr->TableTransformer
def __init__(self, config: TableTransformerConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = TableTransformerAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
object_queries: Optional[torch.Tensor] = None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*): object queries, to be added to hidden_states.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
object_queries=object_queries,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class TableTransformerDecoderLayer(GradientCheckpointingLayer):
# Copied from transformers.models.detr.modeling_detr.DetrDecoderLayer.__init__ with Detr->TableTransformer
def __init__(self, config: TableTransformerConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = TableTransformerAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = TableTransformerAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
object_queries: Optional[torch.Tensor] = None,
query_position_embeddings: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*):
object queries that are added to the queries and keys
in the cross-attention layer.
query_position_embeddings (`torch.FloatTensor`, *optional*):
object queries that are added to the queries and keys
in the self-attention layer.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
object_queries=query_position_embeddings,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
object_queries=query_position_embeddings,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
spatial_position_embeddings=object_queries,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
# Fully Connected
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
class TableTransformerPreTrainedModel(PreTrainedModel):
config: TableTransformerConfig
base_model_prefix = "model"
main_input_name = "pixel_values"
_no_split_modules = [
r"TableTransformerConvEncoder",
r"TableTransformerEncoderLayer",
r"TableTransformerDecoderLayer",
]
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, TableTransformerLearnedPositionEmbedding):
nn.init.uniform_(module.row_embeddings.weight)
nn.init.uniform_(module.column_embeddings.weight)
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
class TableTransformerEncoder(TableTransformerPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TableTransformerEncoderLayer`].
The encoder updates the flattened feature map through multiple self-attention layers.
Small tweak for Table Transformer:
- object_queries are added to the forward pass.
Args:
config: TableTransformerConfig
"""
def __init__(self, config: TableTransformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.layers = nn.ModuleList([TableTransformerEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm = nn.LayerNorm(config.d_model)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs_embeds=None,
attention_mask=None,
object_queries=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
# we add object_queries as extra input to the encoder_layer
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
object_queries=object_queries,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
hidden_states = self.layernorm(hidden_states)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.detr.modeling_detr.DetrDecoder with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
class TableTransformerDecoder(TableTransformerPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TableTransformerDecoderLayer`].
The decoder updates the query embeddings through multiple self-attention and cross-attention layers.
Some small tweaks for TABLE_TRANSFORMER:
- object_queries and query_position_embeddings are added to the forward pass.
- if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers.
Args:
config: TableTransformerConfig
"""
def __init__(self, config: TableTransformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.layers = nn.ModuleList([TableTransformerDecoderLayer(config) for _ in range(config.decoder_layers)])
# in TABLE_TRANSFORMER, the decoder uses layernorm after the last decoder layer output
self.layernorm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs_embeds=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
object_queries=None,
query_position_embeddings=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The query embeddings that are passed into the decoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`:
- 1 for queries that are **not masked**,
- 0 for queries that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Object queries that are added to the queries and keys in each cross-attention layer.
query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
, *optional*): Position embeddings that are added to the values and keys in each self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if inputs_embeds is not None:
hidden_states = inputs_embeds
input_shape = inputs_embeds.size()[:-1]
combined_attention_mask = None
if attention_mask is not None and combined_attention_mask is not None:
# [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
combined_attention_mask = combined_attention_mask + _prepare_4d_attention_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
encoder_attention_mask = _prepare_4d_attention_mask(
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# optional intermediate hidden states
intermediate = () if self.config.auxiliary_loss else None
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(
hidden_states,
combined_attention_mask,
object_queries,
query_position_embeddings,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if self.config.auxiliary_loss:
hidden_states = self.layernorm(hidden_states)
intermediate += (hidden_states,)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# finally, apply layernorm
hidden_states = self.layernorm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
# stack intermediate decoder activations
if self.config.auxiliary_loss:
intermediate = torch.stack(intermediate)
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate]
if v is not None
)
return TableTransformerDecoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
intermediate_hidden_states=intermediate,
)
@auto_docstring(
custom_intro="""
The bare Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) outputting raw
hidden-states without any specific head on top.
"""
)
class TableTransformerModel(TableTransformerPreTrainedModel):
# Copied from transformers.models.detr.modeling_detr.DetrModel.__init__ with Detr->TableTransformer
def __init__(self, config: TableTransformerConfig):
super().__init__(config)
# Create backbone + positional encoding
backbone = TableTransformerConvEncoder(config)
object_queries = build_position_encoding(config)
self.backbone = TableTransformerConvModel(backbone, object_queries)
# Create projection layer
self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1)
self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model)
self.encoder = TableTransformerEncoder(config)
self.decoder = TableTransformerDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def freeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(False)
def unfreeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(True)
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: Optional[torch.FloatTensor] = None,
decoder_attention_mask: Optional[torch.FloatTensor] = None,
encoder_outputs: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], TableTransformerModelOutput]:
r"""
decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Not used by default. Can be used to mask object queries.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
Examples:
```python
>>> from transformers import AutoImageProcessor, TableTransformerModel
>>> from huggingface_hub import hf_hub_download
>>> from PIL import Image
>>> file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png")
>>> image = Image.open(file_path).convert("RGB")
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection")
>>> model = TableTransformerModel.from_pretrained("microsoft/table-transformer-detection")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> # the last hidden states are the final query embeddings of the Transformer decoder
>>> # these are of shape (batch_size, num_queries, hidden_size)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 15, 256]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones(((batch_size, height, width)), device=device)
# First, sent pixel_values + pixel_mask through Backbone to obtain the features
# pixel_values should be of shape (batch_size, num_channels, height, width)
# pixel_mask should be of shape (batch_size, height, width)
features, position_embeddings_list = self.backbone(pixel_values, pixel_mask)
# get final feature map and downsampled mask
feature_map, mask = features[-1]
if mask is None:
raise ValueError("Backbone does not return downsampled pixel mask")
# Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
projected_feature_map = self.input_projection(feature_map)
# Third, flatten the feature map + object queries of shape NxCxHxW to NxCxHW, and permute it to NxHWxC
# In other words, turn their shape into (batch_size, sequence_length, hidden_size)
flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1)
object_queries = position_embeddings_list[-1].flatten(2).permute(0, 2, 1)
flattened_mask = mask.flatten(1)
# Fourth, sent flattened_features + flattened_mask + object queries through encoder
# flattened_features is a Tensor of shape (batch_size, height*width, hidden_size)
# flattened_mask is a Tensor of shape (batch_size, height*width)
if encoder_outputs is None:
encoder_outputs = self.encoder(
inputs_embeds=flattened_features,
attention_mask=flattened_mask,
object_queries=object_queries,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# Fifth, sent query embeddings + object queries through the decoder (which is conditioned on the encoder output)
query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1)
queries = torch.zeros_like(query_position_embeddings)
# decoder outputs consists of (dec_features, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
inputs_embeds=queries,
attention_mask=None,
object_queries=object_queries,
query_position_embeddings=query_position_embeddings,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=flattened_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return TableTransformerModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
intermediate_hidden_states=decoder_outputs.intermediate_hidden_states,
)
@auto_docstring(
custom_intro="""
Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on
top, for tasks such as COCO detection.
"""
)
class TableTransformerForObjectDetection(TableTransformerPreTrainedModel):
# Copied from transformers.models.detr.modeling_detr.DetrForObjectDetection.__init__ with Detr->TableTransformer
def __init__(self, config: TableTransformerConfig):
super().__init__(config)
# DETR encoder-decoder model
self.model = TableTransformerModel(config)
# Object detection heads
self.class_labels_classifier = nn.Linear(
config.d_model, config.num_labels + 1
) # We add one for the "no object" class
self.bbox_predictor = TableTransformerMLPPredictionHead(
input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: Optional[torch.FloatTensor] = None,
decoder_attention_mask: Optional[torch.FloatTensor] = None,
encoder_outputs: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[list[dict]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], TableTransformerObjectDetectionOutput]:
r"""
decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Not used by default. Can be used to mask object queries.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
labels (`list[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> from transformers import AutoImageProcessor, TableTransformerForObjectDetection
>>> import torch
>>> from PIL import Image
>>> file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png")
>>> image = Image.open(file_path).convert("RGB")
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection")
>>> model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-detection")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
>>> target_sizes = torch.tensor([image.size[::-1]])
>>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[
... 0
... ]
>>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
... box = [round(i, 2) for i in box.tolist()]
... print(
... f"Detected {model.config.id2label[label.item()]} with confidence "
... f"{round(score.item(), 3)} at location {box}"
... )
Detected table with confidence 1.0 at location [202.1, 210.59, 1119.22, 385.09]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# First, sent images through TABLE_TRANSFORMER base model to obtain encoder + decoder outputs
outputs = self.model(
pixel_values,
pixel_mask=pixel_mask,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
# class logits + predicted bounding boxes
logits = self.class_labels_classifier(sequence_output)
pred_boxes = self.bbox_predictor(sequence_output).sigmoid()
loss, loss_dict, auxiliary_outputs = None, None, None
if labels is not None:
outputs_class, outputs_coord = None, None
if self.config.auxiliary_loss:
intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4]
outputs_class = self.class_labels_classifier(intermediate)
outputs_coord = self.bbox_predictor(intermediate).sigmoid()
loss, loss_dict, auxiliary_outputs = self.loss_function(
logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord
)
if not return_dict:
if auxiliary_outputs is not None:
output = (logits, pred_boxes) + auxiliary_outputs + outputs
else:
output = (logits, pred_boxes) + outputs
return ((loss, loss_dict) + output) if loss is not None else output
return TableTransformerObjectDetectionOutput(
loss=loss,
loss_dict=loss_dict,
logits=logits,
pred_boxes=pred_boxes,
auxiliary_outputs=auxiliary_outputs,
last_hidden_state=outputs.last_hidden_state,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->TableTransformer,detr->table_transformer
class TableTransformerMLPPredictionHead(nn.Module):
"""
Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
height and width of a bounding box w.r.t. an image.
Copied from https://github.com/facebookresearch/table_transformer/blob/master/models/table_transformer.py
"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
__all__ = ["TableTransformerForObjectDetection", "TableTransformerModel", "TableTransformerPreTrainedModel"]
| transformers/src/transformers/models/table_transformer/modeling_table_transformer.py/0 | {
"file_path": "transformers/src/transformers/models/table_transformer/modeling_table_transformer.py",
"repo_id": "transformers",
"token_count": 25217
} | 544 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Tokenization classes for UDOP model."""
import os
import re
import warnings
from shutil import copyfile
from typing import Any, Optional, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import (
AddedToken,
BatchEncoding,
EncodedInput,
PreTokenizedInput,
TextInput,
TextInputPair,
TruncationStrategy,
)
from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
UDOP_ENCODE_KWARGS_DOCSTRING = r"""
add_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to encode the sequences with the special tokens relative to their model.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will
truncate token by token, removing a token from the longest sequence in the pair if a pair of
sequences (or a batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
stride (`int`, *optional*, defaults to 0):
If set to a number along with `max_length`, the overflowing tokens returned when
`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
returned to provide some overlap between truncated and overflowing sequences. The value of this
argument defines the number of overlapping tokens.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_token_type_ids (`bool`, *optional*):
Whether to return token type IDs. If left to the default, will return the token type IDs according to
the specific tokenizer's default, defined by the `return_outputs` attribute.
[What are token type IDs?](../glossary#token-type-ids)
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
of returning overflowing tokens.
return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
Whether or not to return special tokens mask information.
return_offsets_mapping (`bool`, *optional*, defaults to `False`):
Whether or not to return `(char_start, char_end)` for each token.
This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
Python's tokenizer, this method will raise `NotImplementedError`.
return_length (`bool`, *optional*, defaults to `False`):
Whether or not to return the lengths of the encoded inputs.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
**kwargs: passed to the `self.tokenize()` method
Return:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
[What are input IDs?](../glossary#input-ids)
- **bbox** -- List of bounding boxes to be fed to a model.
- **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
if *"token_type_ids"* is in `self.model_input_names`).
[What are token type IDs?](../glossary#token-type-ids)
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
[What are attention masks?](../glossary#attention-mask)
- **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
- **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
`return_overflowing_tokens=True`).
- **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
`return_overflowing_tokens=True`).
- **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
- **length** -- The length of the inputs (when `return_length=True`).
"""
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
@requires(backends=("sentencepiece",))
class UdopTokenizer(PreTrainedTokenizer):
"""
Adapted from [`LayoutXLMTokenizer`] and [`T5Tokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
legacy (`bool`, *optional*, defaults to `True`):
Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622
which includes fixes to properly handle tokens that appear after special tokens. A simple example:
- `legacy=True`:
```python
>>> from transformers import T5Tokenizer
>>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True)
>>> tokenizer.encode("Hello <extra_id_0>.")
[8774, 32099, 3, 5, 1]
```
- `legacy=False`:
```python
>>> from transformers import T5Tokenizer
>>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False)
>>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
[8774, 32099, 5, 1]
```
Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for
more details.
add_prefix_space (`bool`, *optional*, defaults to `True`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
eos_token="</s>",
unk_token="<unk>",
sep_token="</s>",
pad_token="<pad>",
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_label=-100,
only_label_first_subword=True,
additional_special_tokens=None,
sp_model_kwargs: Optional[dict[str, Any]] = None,
legacy=True,
add_prefix_space=True,
**kwargs,
) -> None:
eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
self.legacy = legacy
self.add_prefix_space = add_prefix_space
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
# additional properties
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
super().__init__(
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
sep_token_box=sep_token_box,
pad_token_box=pad_token_box,
pad_token_label=pad_token_label,
only_label_first_subword=only_label_first_subword,
additional_special_tokens=additional_special_tokens,
sp_model_kwargs=self.sp_model_kwargs,
legacy=legacy,
add_prefix_space=add_prefix_space,
**kwargs,
)
@property
def vocab_size(self):
return len(self.sp_model)
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_vocab
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_special_tokens_mask
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
# normal case: some special tokens
if token_ids_1 is None:
return ([0] * len(token_ids_0)) + [1]
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_sentinel_tokens
def get_sentinel_tokens(self):
return list(
set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens))
)
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_sentinel_token_ids
def get_sentinel_token_ids(self):
return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()]
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._add_eos_if_not_present
def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]:
"""Do not add eos again if user already added it."""
if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added."
)
return token_ids
else:
return token_ids + [self.eos_token_id]
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.create_token_type_ids_from_sequences
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.build_inputs_with_special_tokens
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
token_ids_0 = self._add_eos_if_not_present(token_ids_0)
if token_ids_1 is None:
return token_ids_0
else:
token_ids_1 = self._add_eos_if_not_present(token_ids_1)
return token_ids_0 + token_ids_1
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.__getstate__
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__.update(d)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
def tokenize(self, text: "TextInput", **kwargs) -> list[str]:
"""
Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
first token is special.
"""
if self.legacy or len(text) == 0:
return super().tokenize(text, **kwargs)
text = text.replace(SPIECE_UNDERLINE, " ")
if self.add_prefix_space:
text = SPIECE_UNDERLINE + text
tokens = super().tokenize(text, **kwargs)
if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
tokens = tokens[1:]
return tokens
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
def _tokenize(self, text, **kwargs):
"""
Returns a tokenized string.
We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
`['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
`unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
`self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
"""
if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
return self.sp_model.encode(text, out_type=str)
# 1. Encode string + prefix ex: "<unk> Hey"
tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
# 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.convert_tokens_to_string
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
# since we manually add the prefix space, we have to remove it when decoding
if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
tokens[0] = tokens[0][1:]
current_sub_tokens = []
out_string = ""
prev_is_special = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(current_sub_tokens) + token
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.save_vocabulary
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
@add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]] = None,
boxes: Optional[Union[list[list[int]], list[list[list[int]]]]] = None,
word_labels: Optional[Union[list[int], list[list[int]]]] = None,
text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
text_pair_target: Optional[
Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]
] = None,
**kwargs,
) -> BatchEncoding:
if text is None and text_target is None:
raise ValueError("You need to specify either `text` or `text_target`.")
if text is not None:
# The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the
# input mode in this case.
if not self._in_target_context_manager:
self._switch_to_input_mode()
encodings = self.call_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, **kwargs)
if text_target is not None:
self._switch_to_target_mode()
target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **kwargs)
# Leave back tokenizer in input mode
self._switch_to_input_mode()
if text_target is None:
return encodings
elif text is None:
return target_encodings
else:
encodings["labels"] = target_encodings["input_ids"]
return encodings
def call_boxes(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]],
text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]] = None,
boxes: Optional[Union[list[list[int]], list[list[list[int]]]]] = None,
word_labels: Optional[Union[list[int], list[list[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`list[list[int]]`, `list[list[list[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`list[int]`, `list[list[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
# Input type checking for clearer error
def _is_valid_text_input(t):
if isinstance(t, str):
# Strings are fine
return True
elif isinstance(t, (list, tuple)):
# List are fine as long as they are...
if len(t) == 0:
# ... empty
return True
elif isinstance(t[0], str):
# ... list of strings
return True
elif isinstance(t[0], (list, tuple)):
# ... list with an empty list or with a list of strings
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
# in case text + text_pair are provided, text = questions, text_pair = words
if not _is_valid_text_input(text):
raise ValueError("text input must of type `str` (single example) or `list[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)):
raise ValueError(
"words must of type `list[str]` (single pretokenized example), "
"or `list[list[str]]` (batch of pretokenized examples)."
)
else:
# in case only text is provided => must be words
if not isinstance(text, (list, tuple)):
raise ValueError(
"Words must of type `list[str]` (single pretokenized example), "
"or `list[list[str]]` (batch of pretokenized examples)."
)
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError("You must provide corresponding bounding boxes")
if is_batched:
if len(words) != len(boxes):
raise ValueError("You must provide words and boxes for an equal amount of examples")
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError("You must provide as many words as there are bounding boxes")
else:
if len(words) != len(boxes):
raise ValueError("You must provide as many words as there are bounding boxes")
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
f" {len(text_pair)}."
)
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus_boxes(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus_boxes(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def batch_encode_plus_boxes(
self,
batch_text_or_text_pairs: Union[
list[TextInput],
list[TextInputPair],
list[PreTokenizedInput],
],
is_pair: Optional[bool] = None,
boxes: Optional[list[list[list[int]]]] = None,
word_labels: Optional[list[list[int]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
Args:
batch_text_or_text_pairs (`list[str]`, `list[tuple[str, str]]`, `list[list[str]]`, `list[tuple[list[str], list[str]]]`, and for not-fast tokenizers, also `list[list[int]]`, `list[tuple[list[int], list[int]]]`):
Batch of sequences or pair of sequences to be encoded. This can be a list of
string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see
details in `encode_plus`).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus_boxes(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def encode_boxes(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
boxes: Optional[list[list[int]]] = None,
word_labels: Optional[list[list[int]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> list[int]:
"""
Args:
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing
`self.convert_tokens_to_ids(self.tokenize(text))`.
text (`str`, `list[str]` or `list[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
text_pair (`str`, `list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
"""
encoded_inputs = self.encode_plus_boxes(
text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def encode_plus_boxes(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[list[list[int]]] = None,
word_labels: Optional[list[list[int]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences.
<Tip warning={true}>
This method is deprecated, `__call__` should be used instead.
</Tip>
Args:
text (`str`, `list[str]` or (for non-fast tokenizers) `list[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
text_pair (`str`, `list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus_boxes(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus_boxes(
self,
batch_text_or_text_pairs: Union[
list[TextInput],
list[TextInputPair],
list[PreTokenizedInput],
],
is_pair: Optional[bool] = None,
boxes: Optional[list[list[list[int]]]] = None,
word_labels: Optional[list[list[int]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
batch_outputs = self._batch_prepare_for_model_boxes(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
@add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING)
def _batch_prepare_for_model_boxes(
self,
batch_text_or_text_pairs,
is_pair: Optional[bool] = None,
boxes: Optional[list[list[int]]] = None,
word_labels: Optional[list[list[int]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
"""
batch_outputs = {}
for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
batch_text_or_text_pair, boxes_example = example
outputs = self.prepare_for_model_boxes(
batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
batch_text_or_text_pair[1] if is_pair else None,
boxes_example,
word_labels=word_labels[idx] if word_labels is not None else None,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
padding_side=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
def _encode_plus_boxes(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[list[list[int]]] = None,
word_labels: Optional[list[int]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast. "
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
return self.prepare_for_model_boxes(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
@add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING)
def prepare_for_model_boxes(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[list[list[int]]] = None,
word_labels: Optional[list[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
**kwargs,
) -> BatchEncoding:
"""
Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
truncates sequences if overflowing while taking into account the special tokens and manages a moving window
(with user defined stride) for overflowing tokens.
Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
labeled with -100, such that they will be ignored by the loss function.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
tokens = []
pair_tokens = []
token_boxes = []
pair_token_boxes = []
labels = []
if text_pair is None:
if word_labels is None:
# CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
for word, box in zip(text, boxes):
if len(word) < 1: # skip empty words
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
else:
# CASE 2: token classification (training)
for word, box, label in zip(text, boxes, word_labels):
if len(word) < 1: # skip empty words
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
if self.only_label_first_subword:
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
else:
labels.extend([label] * len(word_tokens))
else:
# CASE 3: document visual question answering (inference)
# text = question
# text_pair = words
tokens = self.tokenize(text)
token_boxes = [self.pad_token_box for _ in range(len(tokens))]
for word, box in zip(text_pair, boxes):
if len(word) < 1: # skip empty words
continue
word_tokens = self.tokenize(word)
pair_tokens.extend(word_tokens)
pair_token_boxes.extend([box] * len(word_tokens))
# Create ids + pair_ids
ids = self.convert_tokens_to_ids(tokens)
pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
# Compute the total size of the returned encodings
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
# Truncation: Handle max sequence length
overflowing_tokens = []
overflowing_token_boxes = []
overflowing_labels = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
(
ids,
token_boxes,
pair_ids,
pair_token_boxes,
labels,
overflowing_tokens,
overflowing_token_boxes,
overflowing_labels,
) = self.truncate_sequences(
ids,
token_boxes,
pair_ids=pair_ids,
pair_token_boxes=pair_token_boxes,
labels=labels,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_token_type_ids and not add_special_tokens:
raise ValueError(
"Asking to return token_type_ids while setting add_special_tokens to False "
"results in an undefined behavior. Please set add_special_tokens to True or "
"set return_token_type_ids to None."
)
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
encoded_inputs["overflowing_labels"] = overflowing_labels
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
token_boxes = token_boxes + [self.sep_token_box]
if pair_token_boxes:
pair_token_boxes = pair_token_boxes + [self.sep_token_box]
if labels:
labels = labels + [self.pad_token_label]
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
# Build output dictionary
encoded_inputs["input_ids"] = sequence
encoded_inputs["bbox"] = token_boxes + pair_token_boxes
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
if labels:
encoded_inputs["labels"] = labels
# Check lengths
self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
# Padding
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
# Copied from transformers.models.layoutxlm.tokenization_layoutxlm.LayoutXLMTokenizer.truncate_sequences
def truncate_sequences(
self,
ids: list[int],
token_boxes: list[list[int]],
pair_ids: Optional[list[int]] = None,
pair_token_boxes: Optional[list[list[int]]] = None,
labels: Optional[list[int]] = None,
num_tokens_to_remove: int = 0,
truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
stride: int = 0,
) -> tuple[list[int], list[int], list[int]]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
ids (`list[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
token_boxes (`list[list[int]]`):
Bounding boxes of the first sequence.
pair_ids (`list[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
pair_token_boxes (`list[list[int]]`, *optional*):
Bounding boxes of the second sequence.
labels (`list[int]`, *optional*):
Labels of the first sequence (for token classification tasks).
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`tuple[list[int], list[int], list[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens.
"""
if num_tokens_to_remove <= 0:
return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
overflowing_token_boxes = []
overflowing_labels = []
if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
if not overflowing_tokens:
window_len = min(len(ids), stride + 1)
else:
window_len = 1
overflowing_tokens.extend(ids[-window_len:])
overflowing_token_boxes.extend(token_boxes[-window_len:])
overflowing_labels.extend(labels[-window_len:])
ids = ids[:-1]
token_boxes = token_boxes[:-1]
labels = labels[:-1]
else:
if not overflowing_tokens:
window_len = min(len(pair_ids), stride + 1)
else:
window_len = 1
overflowing_tokens.extend(pair_ids[-window_len:])
overflowing_token_boxes.extend(pair_token_boxes[-window_len:])
pair_ids = pair_ids[:-1]
pair_token_boxes = pair_token_boxes[:-1]
elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
if len(ids) > num_tokens_to_remove:
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
overflowing_token_boxes = token_boxes[-window_len:]
overflowing_labels = labels[-window_len:]
ids = ids[:-num_tokens_to_remove]
token_boxes = token_boxes[:-num_tokens_to_remove]
labels = labels[:-num_tokens_to_remove]
else:
logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the first sequence has a length {len(ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, "
"for instance 'longest_first' or 'only_second'."
)
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
if len(pair_ids) > num_tokens_to_remove:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
overflowing_token_boxes = pair_token_boxes[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
else:
logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the second sequence has a length {len(pair_ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, "
"for instance 'longest_first' or 'only_first'."
)
return (
ids,
token_boxes,
pair_ids,
pair_token_boxes,
labels,
overflowing_tokens,
overflowing_token_boxes,
overflowing_labels,
)
# Copied from transformers.models.layoutxlm.tokenization_layoutxlm.LayoutXLMTokenizer._pad
def _pad(
self,
encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
if "labels" in encoded_inputs:
encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
if "labels" in encoded_inputs:
encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError("Invalid padding strategy:" + str(padding_side))
return encoded_inputs
__all__ = ["UdopTokenizer"]
| transformers/src/transformers/models/udop/tokenization_udop.py/0 | {
"file_path": "transformers/src/transformers/models/udop/tokenization_udop.py",
"repo_id": "transformers",
"token_count": 32569
} | 545 |
# coding=utf-8
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch UniSpeechSat model."""
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.nn as nn
from ...modeling_outputs import ModelOutput, Wav2Vec2BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from ..wav2vec2.modeling_wav2vec2 import (
Wav2Vec2Encoder,
Wav2Vec2EncoderStableLayerNorm,
Wav2Vec2FeatureEncoder,
Wav2Vec2FeatureProjection,
Wav2Vec2ForAudioFrameClassification,
Wav2Vec2ForCTC,
Wav2Vec2ForSequenceClassification,
Wav2Vec2ForXVector,
Wav2Vec2GumbelVectorQuantizer,
Wav2Vec2Model,
Wav2Vec2PositionalConvEmbedding,
)
from .configuration_unispeech_sat import UniSpeechSatConfig
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`UniSpeechSatForPreTrainingOutput`], with potential hidden states and attentions.
"""
)
class UniSpeechSatForPreTrainingOutput(ModelOutput):
r"""
loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`, *optional*):
Prediction scores of the contrastive loss model, i.e. the output of the model before the final softmax.
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
target vectors for contrastive loss.
codevector_perplexity (`torch.FloatTensor` of shape `(1,)`):
The perplexity of the codevector distribution, used to measure the diversity of the codebook.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
projected_states: Optional[torch.FloatTensor] = None
projected_quantized_states: Optional[torch.FloatTensor] = None
codevector_perplexity: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
class UniSpeechSatPositionalConvEmbedding(Wav2Vec2PositionalConvEmbedding):
pass
class UniSpeechSatFeatureEncoder(Wav2Vec2FeatureEncoder):
pass
class UniSpeechSatFeatureProjection(Wav2Vec2FeatureProjection):
pass
class UniSpeechSatEncoder(Wav2Vec2Encoder):
pass
class UniSpeechSatEncoderStableLayerNorm(Wav2Vec2EncoderStableLayerNorm):
pass
class UniSpeechSatGumbelVectorQuantizer(Wav2Vec2GumbelVectorQuantizer):
def __init__(self, config):
super().__init__()
self.weight_proj = nn.Linear(config.hidden_size, self.num_groups * self.num_vars)
@staticmethod
def _compute_perplexity(probs, mask=None):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(
hidden_states.float(), tau=self.temperature, hard=True
).type_as(hidden_states)
# compute perplexity
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
# use probs to retrieve codevectors
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
@auto_docstring
class UniSpeechSatPreTrainedModel(PreTrainedModel):
config: UniSpeechSatConfig
base_model_prefix = "unispeech_sat"
main_input_name = "input_values"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
def _init_weights(self, module):
"""Initialize the weights"""
# gumbel softmax requires special init
if isinstance(module, UniSpeechSatGumbelVectorQuantizer):
module.weight_proj.weight.data.normal_(mean=0.0, std=1)
module.weight_proj.bias.data.zero_()
nn.init.uniform_(module.codevectors)
elif isinstance(module, UniSpeechSatPositionalConvEmbedding):
nn.init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, UniSpeechSatFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
# Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
UniSpeechSatBaseModelOutput = Wav2Vec2BaseModelOutput
class UniSpeechSatModel(UniSpeechSatPreTrainedModel, Wav2Vec2Model):
def __init__(self, config: UniSpeechSatConfig):
UniSpeechSatPreTrainedModel.__init__(config)
self.config = config
self.feature_extractor = UniSpeechSatFeatureEncoder(config)
self.feature_projection = UniSpeechSatFeatureProjection(config)
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = UniSpeechSatEncoderStableLayerNorm(config)
else:
self.encoder = UniSpeechSatEncoder(config)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
raise AttributeError("Not needed for UniSpeechSat")
def freeze_feature_encoder(self):
raise AttributeError("Not needed for UniSpeechSat")
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, UniSpeechSatBaseModelOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return UniSpeechSatBaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
UniSpeechSat Model with a vector-quantization module and ctc loss for pre-training.
"""
)
class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel):
def __init__(self, config: UniSpeechSatConfig):
super().__init__(config)
self.unispeech_sat = UniSpeechSatModel(config)
self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)
self.quantizer = UniSpeechSatGumbelVectorQuantizer(config)
self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)
self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim)
self.dropout = nn.Dropout(config.final_dropout)
self.speaker_proj = nn.Linear(config.hidden_size, config.codevector_dim)
self.label_embeddings_concat = nn.Parameter(torch.FloatTensor(config.num_clusters, config.codevector_dim))
self.label_embeddings_concat.data.zero_()
self.layer_norm_for_extract = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if self.config.do_stable_layer_norm:
self.layer_norm_for_extract.requires_grad = False
# Initialize weights and apply final processing
self.post_init()
def set_gumbel_temperature(self, temperature: int):
"""
Set the Gumbel softmax temperature to a given value. Only necessary for training
"""
self.quantizer.temperature = temperature
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech_sat.feature_extractor._freeze_parameters()
@staticmethod
def compute_contrastive_logits(
target_features: torch.FloatTensor,
negative_features: torch.FloatTensor,
predicted_features: torch.FloatTensor,
temperature: int = 1,
):
"""
Compute logits for contrastive loss based using cosine similarity as the distance measure between
`[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
"""
target_features = torch.cat([target_features, negative_features], dim=0)
logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1)
logits = logits.type_as(target_features)
# apply temperature
logits = logits / temperature
return logits
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, UniSpeechSatForPreTrainingOutput]:
r"""
Example:
```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, UniSpeechSatForPreTraining
>>> from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-sat-base")
>>> model = UniSpeechSatForPreTraining.from_pretrained("microsoft/unispeech-sat-base")
>>> # TODO: Add full pretraining example
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech_sat(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
transformer_features = outputs[0]
# quantize all (unmasked) extracted features and project to final vq dim
extract_features = self.dropout_features(outputs[1])
# TODO(PVP) - add pretraining logic and add to tests
logits = extract_features
loss = quantized_features = codevector_perplexity = None
if not return_dict:
if loss is not None:
return (loss, logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return (logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return UniSpeechSatForPreTrainingOutput(
loss=loss,
logits=logits,
projected_states=transformer_features,
projected_quantized_states=quantized_features,
codevector_perplexity=codevector_perplexity,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class UniSpeechSatForCTC(Wav2Vec2ForCTC):
pass
class UniSpeechSatForSequenceClassification(Wav2Vec2ForSequenceClassification):
pass
class UniSpeechSatForAudioFrameClassification(Wav2Vec2ForAudioFrameClassification):
pass
class UniSpeechSatForXVector(Wav2Vec2ForXVector):
pass
__all__ = [
"UniSpeechSatForAudioFrameClassification",
"UniSpeechSatForCTC",
"UniSpeechSatForPreTraining",
"UniSpeechSatForSequenceClassification",
"UniSpeechSatForXVector",
"UniSpeechSatModel",
"UniSpeechSatPreTrainedModel",
]
| transformers/src/transformers/models/unispeech_sat/modular_unispeech_sat.py/0 | {
"file_path": "transformers/src/transformers/models/unispeech_sat/modular_unispeech_sat.py",
"repo_id": "transformers",
"token_count": 7473
} | 546 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for VideoLlava.
"""
from typing import Optional, Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput, get_image_size, to_numpy_array
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType, logging
logger = logging.get_logger(__name__)
class VideoLlavaProcessor(ProcessorMixin):
r"""
Constructs a VideoLlava processor which wraps a VideoLlava image processor and a Llava tokenizer into a single processor.
[`VideoLlavaProcessor`] offers all the functionalities of [`VideoLlavaImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~VideoLlavaProcessor.__call__`] and [`~VideoLlavaProcessor.decode`] for more information.
Args:
image_processor ([`VideoLlavaImageProcessor`], *optional*):
The image processor is a required input.
video_processor ([`VideoLlavaVideoProcessor`], *optional*):
The video processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
patch_size (`int`, *optional*, defaults to 14):
Patch size from the vision tower.
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Should be same as in model's config
image_token (`str`, *optional*, defaults to `"<image>"`):
Special token used to denote image location.
video_token (`str`, *optional*, defaults to `"<video>"`):
Special token used to denote video location.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
num_additional_image_tokens (`int`, *optional*, defaults to 1):
Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other
extra tokens appended, no need to set this arg.
"""
attributes = ["image_processor", "video_processor", "tokenizer"]
image_processor_class = "VideoLlavaImageProcessor"
video_processor_class = "AutoVideoProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(
self,
image_processor=None,
video_processor=None,
tokenizer=None,
patch_size=14,
vision_feature_select_strategy="default",
image_token="<image>", # set the default and let users change if they have peculiar special tokens in rare cases
video_token="<video>",
chat_template=None,
num_additional_image_tokens=1,
**kwargs,
):
self.patch_size = patch_size
self.num_additional_image_tokens = num_additional_image_tokens
self.vision_feature_select_strategy = vision_feature_select_strategy
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
self.video_token = tokenizer.video_token if hasattr(tokenizer, "video_token") else video_token
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
self.video_token_id = tokenizer.convert_tokens_to_ids(self.video_token)
super().__init__(image_processor, video_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
images: ImageInput = None,
videos: ImageInput = None,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length=None,
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
VideoLlavaImageProcessor's [`~VideoLlavaImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
Video frames to preprocess. Expects a single or batch of video frames in NumPy array or PyTorch
tensor. Each video should be of shape (T, C, H, W), where T is number of frames, C is
number of channels, H and W are image height and width.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`, *optional*):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values to be fed to a model. Returned when `videos` is not `None`.
"""
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
data = {}
if images is not None:
encoded_images = self.image_processor(images=images, return_tensors=return_tensors)
data.update(encoded_images)
height, width = get_image_size(to_numpy_array(encoded_images.get("pixel_values_images")[0]))
num_image_tokens = (height // self.patch_size) * (width // self.patch_size)
num_image_tokens += self.num_additional_image_tokens
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
text = [sample.replace(self.image_token, self.image_token * num_image_tokens) for sample in text]
if videos is not None:
encoded_videos = self.video_processor(videos=videos, return_tensors=return_tensors)
data.update(encoded_videos)
one_video = encoded_videos.get("pixel_values_videos")[0]
if isinstance(encoded_videos.get("pixel_values_videos")[0], (list, tuple)):
one_video = np.array(one_video)
else:
one_video = to_numpy_array(one_video)
height, width = get_image_size(one_video[0])
num_frames = one_video.shape[0] # frame dim is always after batch dim
num_image_tokens = (height // self.patch_size) * (width // self.patch_size)
num_image_tokens += self.num_additional_image_tokens
num_video_tokens = num_image_tokens * num_frames
text = [sample.replace(self.video_token, self.video_token * num_video_tokens) for sample in text]
text_inputs = self.tokenizer(
text,
return_tensors=None,
padding=padding,
truncation=truncation,
max_length=max_length,
)
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
data.update(text_inputs)
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["VideoLlavaProcessor"]
| transformers/src/transformers/models/video_llava/processing_video_llava.py/0 | {
"file_path": "transformers/src/transformers/models/video_llava/processing_video_llava.py",
"repo_id": "transformers",
"token_count": 4264
} | 547 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ViT MAE checkpoints from the original repository: https://github.com/facebookresearch/mae"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def rename_key(name):
if "cls_token" in name:
name = name.replace("cls_token", "vit.embeddings.cls_token")
if "mask_token" in name:
name = name.replace("mask_token", "decoder.mask_token")
if "decoder_pos_embed" in name:
name = name.replace("decoder_pos_embed", "decoder.decoder_pos_embed")
if "pos_embed" in name and "decoder" not in name:
name = name.replace("pos_embed", "vit.embeddings.position_embeddings")
if "patch_embed.proj" in name:
name = name.replace("patch_embed.proj", "vit.embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
name = name.replace("patch_embed.norm", "vit.embeddings.norm")
if "decoder_blocks" in name:
name = name.replace("decoder_blocks", "decoder.decoder_layers")
if "blocks" in name:
name = name.replace("blocks", "vit.encoder.layer")
if "attn.proj" in name:
name = name.replace("attn.proj", "attention.output.dense")
if "attn" in name:
name = name.replace("attn", "attention.self")
if "norm1" in name:
name = name.replace("norm1", "layernorm_before")
if "norm2" in name:
name = name.replace("norm2", "layernorm_after")
if "mlp.fc1" in name:
name = name.replace("mlp.fc1", "intermediate.dense")
if "mlp.fc2" in name:
name = name.replace("mlp.fc2", "output.dense")
if "decoder_embed" in name:
name = name.replace("decoder_embed", "decoder.decoder_embed")
if "decoder_norm" in name:
name = name.replace("decoder_norm", "decoder.decoder_norm")
if "decoder_pred" in name:
name = name.replace("decoder_pred", "decoder.decoder_pred")
if "norm.weight" in name and "decoder" not in name:
name = name.replace("norm.weight", "vit.layernorm.weight")
if "norm.bias" in name and "decoder" not in name:
name = name.replace("norm.bias", "vit.layernorm.bias")
return name
def convert_state_dict(orig_state_dict, config):
for key in orig_state_dict.copy():
val = orig_state_dict.pop(key)
if "qkv" in key:
key_split = key.split(".")
layer_num = int(key_split[1])
if "decoder_blocks" in key:
dim = config.decoder_hidden_size
prefix = "decoder.decoder_layers."
if "weight" in key:
orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :]
elif "bias" in key:
orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.bias"] = val[:dim]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.bias"] = val[dim : dim * 2]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.bias"] = val[-dim:]
else:
dim = config.hidden_size
prefix = "vit.encoder.layer."
if "weight" in key:
orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :]
elif "bias" in key:
orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.bias"] = val[:dim]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.bias"] = val[dim : dim * 2]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.bias"] = val[-dim:]
else:
orig_state_dict[rename_key(key)] = val
return orig_state_dict
def convert_vit_mae_checkpoint(checkpoint_url, pytorch_dump_folder_path):
config = ViTMAEConfig()
if "large" in checkpoint_url:
config.hidden_size = 1024
config.intermediate_size = 4096
config.num_hidden_layers = 24
config.num_attention_heads = 16
elif "huge" in checkpoint_url:
config.patch_size = 14
config.hidden_size = 1280
config.intermediate_size = 5120
config.num_hidden_layers = 32
config.num_attention_heads = 16
model = ViTMAEForPreTraining(config)
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]
image_processor = ViTMAEImageProcessor(size=config.image_size)
new_state_dict = convert_state_dict(state_dict, config)
model.load_state_dict(new_state_dict)
model.eval()
url = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
image = Image.open(requests.get(url, stream=True).raw)
image_processor = ViTMAEImageProcessor(size=config.image_size)
inputs = image_processor(images=image, return_tensors="pt")
# forward pass
torch.manual_seed(2)
outputs = model(**inputs)
logits = outputs.logits
if "large" in checkpoint_url:
expected_slice = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]]
)
elif "huge" in checkpoint_url:
expected_slice = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]]
)
else:
expected_slice = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]]
)
# verify logits
assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4)
print(f"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
args = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| transformers/src/transformers/models/vit_mae/convert_vit_mae_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/vit_mae/convert_vit_mae_to_pytorch.py",
"repo_id": "transformers",
"token_count": 3302
} | 548 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Wav2Vec2 checkpoint."""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
Wav2Vec2Config,
Wav2Vec2CTCTokenizer,
Wav2Vec2FeatureExtractor,
Wav2Vec2ForCTC,
Wav2Vec2ForPreTraining,
Wav2Vec2Processor,
logging,
)
from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2ForSequenceClassification
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
MAPPING = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
TOP_LEVEL_KEYS = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def read_txt_into_dict(filename):
result = {}
with open(filename, "r") as file:
for line_number, line in enumerate(file):
line = line.strip()
if line:
words = line.split()
key = line_number
value = words[0]
result[key] = value
return result
def set_recursively(key, value, full_name, weight_type, hf_pointer):
for attribute in key.split("."):
hf_pointer = getattr(hf_pointer, attribute)
hf_param_name = None
for param_key in PARAM_MAPPING:
if full_name.endswith(param_key):
hf_param_name = PARAM_MAPPING[full_name.split(".")[-1]]
weight_type = "param"
# fairseq uses nn.utils.weight_norm() while transformers switches to nn.utils.parametrizations.weight_norm()
# the mapping between two versions:
# https://github.com/pytorch/pytorch/blob/56935684c3dfad7841c83c719eeebecb560fe466/torch/nn/utils/parametrizations.py#L389-L395
if weight_type is not None and weight_type != "param":
if weight_type == "weight_g" and not hasattr(hf_pointer, "weight_g"):
hf_shape = hf_pointer.parametrizations.weight.original0.shape
elif weight_type == "weight_v" and not hasattr(hf_pointer, "weight_v"):
hf_shape = hf_pointer.parametrizations.weight.original1.shape
else:
hf_shape = getattr(hf_pointer, weight_type).shape
elif weight_type is not None and weight_type == "param":
shape_pointer = hf_pointer
for attribute in hf_param_name.split("."):
shape_pointer = getattr(shape_pointer, attribute)
hf_shape = shape_pointer.shape
# let's reduce dimension
value = value[0]
else:
hf_shape = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
hf_pointer.weight.data = value
elif weight_type == "weight_g":
if hasattr(hf_pointer, "weight_g"):
hf_pointer.weight_g.data = value
else:
hf_pointer.parametrizations.weight.original0.data = value
elif weight_type == "weight_v":
if hasattr(hf_pointer, "weight_v"):
hf_pointer.weight_v.data = value
else:
hf_pointer.parametrizations.weight.original1.data = value
elif weight_type == "bias":
hf_pointer.bias.data = value
elif weight_type == "param":
for attribute in hf_param_name.split("."):
hf_pointer = getattr(hf_pointer, attribute)
hf_pointer.data = value
else:
hf_pointer.data = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def rename_dict(key, value, full_name, weight_type, hf_dict):
hf_param_name = None
for param_key in PARAM_MAPPING:
if full_name.endswith(param_key):
hf_param_name = PARAM_MAPPING[full_name.split(".")[-1]]
weight_type = "param"
if weight_type is not None and weight_type != "param":
full_key = ".".join([key, weight_type])
elif weight_type is not None and weight_type == "param":
full_key = ".".join([key, hf_param_name])
else:
full_key = key
hf_dict[full_key] = value if "lm_head" in full_key else value[0]
PARAM_MAPPING = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def load_wav2vec2_layer(name, value, hf_model=None, hf_dict=None):
is_used = False
for key, mapped_key in MAPPING.items():
mapped_key = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
is_used = True
if "*" in mapped_key:
layer_index = name.split(key)[0].split(".")[-2]
mapped_key = mapped_key.replace("*", layer_index)
if "weight_g" in name:
weight_type = "weight_g"
elif "weight_v" in name:
weight_type = "weight_v"
elif "bias" in name:
weight_type = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
weight_type = "weight"
else:
weight_type = None
if hf_dict is not None:
rename_dict(mapped_key, value, name, weight_type, hf_dict)
else:
set_recursively(mapped_key, value, name, weight_type, hf_model)
return is_used
return is_used
def recursively_load_weights(fairseq_model, hf_model, is_headless):
unused_weights = []
fairseq_dict = fairseq_model.state_dict()
feature_extractor = hf_model.wav2vec2.feature_extractor
for name, value in fairseq_dict.items():
is_used = False
if "conv_layers" in name:
load_conv_layer(
name,
value,
feature_extractor,
unused_weights,
hf_model.config.feat_extract_norm == "group",
)
is_used = True
else:
is_used = load_wav2vec2_layer(name, value, hf_model)
if not is_used:
unused_weights.append(name)
logger.warning(f"Unused weights: {unused_weights}")
def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
name = full_name.split("conv_layers.")[-1]
items = name.split(".")
layer_id = int(items[0])
type_id = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].conv.bias.data = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].conv.weight.data = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(full_name)
@torch.no_grad()
def convert_wav2vec2_checkpoint(
checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True, is_seq_class=False
):
"""
Copy/paste/tweak model's weights to transformers design.
"""
if config_path is not None:
config = Wav2Vec2Config.from_pretrained(config_path)
else:
config = Wav2Vec2Config()
if is_seq_class:
id2label = read_txt_into_dict(dict_path)
config.id2label = id2label
hf_wav2vec = Wav2Vec2ForSequenceClassification(config)
feature_extractor = Wav2Vec2FeatureExtractor(
feature_size=1,
sampling_rate=16000,
padding_value=0,
do_normalize=True,
return_attention_mask=True,
)
feature_extractor.save_pretrained(pytorch_dump_folder_path)
elif is_finetuned:
if dict_path:
target_dict = Dictionary.load(dict_path)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
config.bos_token_id = target_dict.pad_index
config.pad_token_id = target_dict.bos_index
config.eos_token_id = target_dict.eos_index
config.vocab_size = len(target_dict.symbols)
vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json")
if not os.path.isdir(pytorch_dump_folder_path):
logger.error(f"--pytorch_dump_folder_path ({pytorch_dump_folder_path}) should be a directory")
return
os.makedirs(pytorch_dump_folder_path, exist_ok=True)
vocab_dict = target_dict.indices
# fairseq has the <pad> and <s> switched
vocab_dict["<pad>"] = 0
vocab_dict["<s>"] = 1
with open(vocab_path, "w", encoding="utf-8") as vocab_handle:
json.dump(vocab_dict, vocab_handle)
tokenizer = Wav2Vec2CTCTokenizer(
vocab_path,
unk_token=target_dict.unk_word,
pad_token=target_dict.pad_word,
bos_token=target_dict.bos_word,
eos_token=target_dict.eos_word,
word_delimiter_token="|",
do_lower_case=False,
)
return_attention_mask = config.feat_extract_norm == "layer"
feature_extractor = Wav2Vec2FeatureExtractor(
feature_size=1,
sampling_rate=16000,
padding_value=0,
do_normalize=True,
return_attention_mask=return_attention_mask,
)
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
processor.save_pretrained(pytorch_dump_folder_path)
hf_wav2vec = Wav2Vec2ForCTC(config)
else:
hf_wav2vec = Wav2Vec2ForPreTraining(config)
if is_finetuned or is_seq_class:
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
)
else:
task_arg = argparse.Namespace(task="audio_pretraining")
task = fairseq.tasks.setup_task(task_arg)
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=task)
model = model[0].eval()
recursively_load_weights(model, hf_wav2vec, not is_finetuned)
hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
args = parser.parse_args()
is_finetuned = not args.not_finetuned and not args.is_seq_class
convert_wav2vec2_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| transformers/src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 6985
} | 549 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Wav2Vec2Conformer checkpoint."""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
Wav2Vec2ConformerConfig,
Wav2Vec2ConformerForCTC,
Wav2Vec2ConformerForPreTraining,
Wav2Vec2CTCTokenizer,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor,
logging,
)
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
MAPPING = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
TOP_LEVEL_KEYS = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def set_recursively(hf_pointer, key, value, full_name, weight_type):
for attribute in key.split("."):
hf_pointer = getattr(hf_pointer, attribute)
if weight_type is not None:
hf_shape = getattr(hf_pointer, weight_type).shape
else:
hf_shape = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
hf_pointer.weight.data = value
elif weight_type == "weight_g":
hf_pointer.weight_g.data = value
elif weight_type == "weight_v":
hf_pointer.weight_v.data = value
elif weight_type == "bias":
hf_pointer.bias.data = value
elif weight_type == "running_mean":
hf_pointer.running_mean.data = value
elif weight_type == "running_var":
hf_pointer.running_var.data = value
elif weight_type == "num_batches_tracked":
hf_pointer.num_batches_tracked.data = value
elif weight_type == "inv_freq":
hf_pointer.inv_freq.data = value
else:
hf_pointer.data = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def recursively_load_weights(fairseq_model, hf_model, is_headless):
unused_weights = []
fairseq_dict = fairseq_model.state_dict()
feature_extractor = hf_model.wav2vec2_conformer.feature_extractor
for name, value in fairseq_dict.items():
is_used = False
if "conv_layers" in name:
load_conv_layer(
name,
value,
feature_extractor,
unused_weights,
hf_model.config.feat_extract_norm == "group",
)
is_used = True
else:
for key, mapped_key in MAPPING.items():
mapped_key = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
is_used = True
if "*" in mapped_key:
layer_index = name.split(key)[0].split(".")[-2]
mapped_key = mapped_key.replace("*", layer_index)
if "pos_bias_u" in name:
weight_type = None
elif "pos_bias_v" in name:
weight_type = None
elif "weight_g" in name:
weight_type = "weight_g"
elif "weight_v" in name:
weight_type = "weight_v"
elif "bias" in name:
weight_type = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
weight_type = "weight"
elif "running_mean" in name:
weight_type = "running_mean"
elif "inv_freq" in name:
weight_type = "inv_freq"
elif "running_var" in name:
weight_type = "running_var"
elif "num_batches_tracked" in name:
weight_type = "num_batches_tracked"
else:
weight_type = None
set_recursively(hf_model, mapped_key, value, name, weight_type)
continue
if not is_used:
unused_weights.append(name)
logger.warning(f"Unused weights: {unused_weights}")
# Copied from transformers.models.wav2vec2.convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.load_conv_layer
def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
name = full_name.split("conv_layers.")[-1]
items = name.split(".")
layer_id = int(items[0])
type_id = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].conv.bias.data = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].conv.weight.data = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found."
)
feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(full_name)
@torch.no_grad()
def convert_wav2vec2_conformer_checkpoint(
checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
):
"""
Copy/paste/tweak model's weights to transformers design.
"""
if config_path is not None:
config = Wav2Vec2ConformerConfig.from_pretrained(config_path, hidden_act="swish")
else:
config = Wav2Vec2ConformerConfig()
if "rope" in checkpoint_path:
config.position_embeddings_type = "rotary"
if is_finetuned:
if dict_path:
target_dict = Dictionary.load(dict_path)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
config.bos_token_id = target_dict.pad_index
config.pad_token_id = target_dict.bos_index
config.eos_token_id = target_dict.eos_index
config.vocab_size = len(target_dict.symbols)
vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json")
if not os.path.isdir(pytorch_dump_folder_path):
logger.error(f"--pytorch_dump_folder_path ({pytorch_dump_folder_path}) should be a directory")
return
os.makedirs(pytorch_dump_folder_path, exist_ok=True)
vocab_dict = target_dict.indices
# fairseq has the <pad> and <s> switched
vocab_dict["<pad>"] = 0
vocab_dict["<s>"] = 1
with open(vocab_path, "w", encoding="utf-8") as vocab_handle:
json.dump(vocab_dict, vocab_handle)
tokenizer = Wav2Vec2CTCTokenizer(
vocab_path,
unk_token=target_dict.unk_word,
pad_token=target_dict.pad_word,
bos_token=target_dict.bos_word,
eos_token=target_dict.eos_word,
word_delimiter_token="|",
do_lower_case=False,
)
return_attention_mask = config.feat_extract_norm == "layer"
feature_extractor = Wav2Vec2FeatureExtractor(
feature_size=1,
sampling_rate=16000,
padding_value=0,
do_normalize=True,
return_attention_mask=return_attention_mask,
)
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
processor.save_pretrained(pytorch_dump_folder_path)
hf_wav2vec = Wav2Vec2ConformerForCTC(config)
else:
hf_wav2vec = Wav2Vec2ConformerForPreTraining(config)
if is_finetuned:
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
)
else:
task_arg = argparse.Namespace(task="audio_pretraining")
task = fairseq.tasks.setup_task(task_arg)
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=task)
model = model[0].eval()
recursively_load_weights(model, hf_wav2vec, not is_finetuned)
hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
args = parser.parse_args()
convert_wav2vec2_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| transformers/src/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 6281
} | 550 |
# Copyright 2022 The OpenAI team and The HuggingFace Team. All rights reserved.
# Most of the code is copy pasted from the original whisper repository
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unicodedata
from collections.abc import Iterator
from fractions import Fraction
from re import Match
from typing import Optional, Union
import regex
# non-ASCII letters that are not separated by "NFKD" normalization
ADDITIONAL_DIACRITICS = {
"œ": "oe",
"Œ": "OE",
"ø": "o",
"Ø": "O",
"æ": "ae",
"Æ": "AE",
"ß": "ss",
"ẞ": "SS",
"đ": "d",
"Đ": "D",
"ð": "d",
"Ð": "D",
"þ": "th",
"Þ": "th",
"ł": "l",
"Ł": "L",
}
def remove_symbols_and_diacritics(s: str, keep=""):
"""
Replace any other markers, symbols, and punctuations with a space, and drop any diacritics (category 'Mn' and some
manual mappings)
"""
def replace_character(char):
if char in keep:
return char
elif char in ADDITIONAL_DIACRITICS:
return ADDITIONAL_DIACRITICS[char]
elif unicodedata.category(char) == "Mn":
return ""
elif unicodedata.category(char)[0] in "MSP":
return " "
return char
return "".join(replace_character(c) for c in unicodedata.normalize("NFKD", s))
def remove_symbols(s: str):
"""
Replace any other markers, symbols, punctuations with a space, keeping diacritics
"""
return "".join(" " if unicodedata.category(c)[0] in "MSP" else c for c in unicodedata.normalize("NFKC", s))
class BasicTextNormalizer:
def __init__(self, remove_diacritics: bool = False, split_letters: bool = False):
self.clean = remove_symbols_and_diacritics if remove_diacritics else remove_symbols
self.split_letters = split_letters
def __call__(self, s: str):
s = s.lower()
s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets
s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis
s = self.clean(s).lower()
if self.split_letters:
s = " ".join(regex.findall(r"\X", s, regex.U))
s = re.sub(r"\s+", " ", s) # replace any successive whitespace characters with a space
return s
class EnglishNumberNormalizer:
"""
Convert any spelled-out numbers into arabic numbers, while handling:
- remove any commas
- keep the suffixes such as: `1960s`, `274th`, `32nd`, etc.
- spell out currency symbols after the number. e.g. `$20 million` -> `20000000 dollars`
- spell out `one` and `ones`
- interpret successive single-digit numbers as nominal: `one oh one` -> `101`
"""
def __init__(self):
super().__init__()
self.zeros = {"o", "oh", "zero"}
# fmt: off
self.ones = {
name: i
for i, name in enumerate(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen"],
start=1,
)
}
# fmt: on
self.ones_plural = {
"sixes" if name == "six" else name + "s": (value, "s") for name, value in self.ones.items()
}
self.ones_ordinal = {
"zeroth": (0, "th"),
"first": (1, "st"),
"second": (2, "nd"),
"third": (3, "rd"),
"fifth": (5, "th"),
"twelfth": (12, "th"),
**{
name + ("h" if name.endswith("t") else "th"): (value, "th")
for name, value in self.ones.items()
if value > 3 and value != 5 and value != 12
},
}
self.ones_suffixed = {**self.ones_plural, **self.ones_ordinal}
self.tens = {
"twenty": 20,
"thirty": 30,
"forty": 40,
"fifty": 50,
"sixty": 60,
"seventy": 70,
"eighty": 80,
"ninety": 90,
}
self.tens_plural = {name.replace("y", "ies"): (value, "s") for name, value in self.tens.items()}
self.tens_ordinal = {name.replace("y", "ieth"): (value, "th") for name, value in self.tens.items()}
self.tens_suffixed = {**self.tens_plural, **self.tens_ordinal}
self.multipliers = {
"hundred": 100,
"thousand": 1_000,
"million": 1_000_000,
"billion": 1_000_000_000,
"trillion": 1_000_000_000_000,
"quadrillion": 1_000_000_000_000_000,
"quintillion": 1_000_000_000_000_000_000,
"sextillion": 1_000_000_000_000_000_000_000,
"septillion": 1_000_000_000_000_000_000_000_000,
"octillion": 1_000_000_000_000_000_000_000_000_000,
"nonillion": 1_000_000_000_000_000_000_000_000_000_000,
"decillion": 1_000_000_000_000_000_000_000_000_000_000_000,
}
self.multipliers_plural = {name + "s": (value, "s") for name, value in self.multipliers.items()}
self.multipliers_ordinal = {name + "th": (value, "th") for name, value in self.multipliers.items()}
self.multipliers_suffixed = {**self.multipliers_plural, **self.multipliers_ordinal}
self.decimals = {*self.ones, *self.tens, *self.zeros}
self.preceding_prefixers = {
"minus": "-",
"negative": "-",
"plus": "+",
"positive": "+",
}
self.following_prefixers = {
"pound": "£",
"pounds": "£",
"euro": "€",
"euros": "€",
"dollar": "$",
"dollars": "$",
"cent": "¢",
"cents": "¢",
}
self.prefixes = set(list(self.preceding_prefixers.values()) + list(self.following_prefixers.values()))
self.suffixers = {
"per": {"cent": "%"},
"percent": "%",
}
self.specials = {"and", "double", "triple", "point"}
self.words = {
key
for mapping in [
self.zeros,
self.ones,
self.ones_suffixed,
self.tens,
self.tens_suffixed,
self.multipliers,
self.multipliers_suffixed,
self.preceding_prefixers,
self.following_prefixers,
self.suffixers,
self.specials,
]
for key in mapping
}
self.literal_words = {"one", "ones"}
def process_words(self, words: list[str]) -> Iterator[str]:
prefix: Optional[str] = None
value: Optional[Union[str, int]] = None
skip = False
def to_fraction(s: str):
try:
return Fraction(s)
except ValueError:
return None
def output(result: Union[str, int]):
nonlocal prefix, value
result = str(result)
if prefix is not None:
result = prefix + result
value = None
prefix = None
return result
if len(words) == 0:
return
for i, current in enumerate(words):
prev = words[i - 1] if i != 0 else None
next = words[i + 1] if i != len(words) - 1 else None
if skip:
skip = False
continue
next_is_numeric = next is not None and re.match(r"^\d+(\.\d+)?$", next)
has_prefix = current[0] in self.prefixes
current_without_prefix = current[1:] if has_prefix else current
if re.match(r"^\d+(\.\d+)?$", current_without_prefix):
# arabic numbers (potentially with signs and fractions)
f = to_fraction(current_without_prefix)
if f is None:
raise ValueError("Converting the fraction failed")
if value is not None:
if isinstance(value, str) and value.endswith("."):
# concatenate decimals / ip address components
value = str(value) + str(current)
continue
else:
yield output(value)
prefix = current[0] if has_prefix else prefix
if f.denominator == 1:
value = f.numerator # store integers as int
else:
value = current_without_prefix
elif current not in self.words:
# non-numeric words
if value is not None:
yield output(value)
yield output(current)
elif current in self.zeros:
value = str(value or "") + "0"
elif current in self.ones:
ones = self.ones[current]
if value is None:
value = ones
elif isinstance(value, str) or prev in self.ones:
if prev in self.tens and ones < 10: # replace the last zero with the digit
value = value[:-1] + str(ones)
else:
value = str(value) + str(ones)
elif ones < 10:
if value % 10 == 0:
value += ones
else:
value = str(value) + str(ones)
else: # eleven to nineteen
if value % 100 == 0:
value += ones
else:
value = str(value) + str(ones)
elif current in self.ones_suffixed:
# ordinal or cardinal; yield the number right away
ones, suffix = self.ones_suffixed[current]
if value is None:
yield output(str(ones) + suffix)
elif isinstance(value, str) or prev in self.ones:
if prev in self.tens and ones < 10:
yield output(value[:-1] + str(ones) + suffix)
else:
yield output(str(value) + str(ones) + suffix)
elif ones < 10:
if value % 10 == 0:
yield output(str(value + ones) + suffix)
else:
yield output(str(value) + str(ones) + suffix)
else: # eleven to nineteen
if value % 100 == 0:
yield output(str(value + ones) + suffix)
else:
yield output(str(value) + str(ones) + suffix)
value = None
elif current in self.tens:
tens = self.tens[current]
if value is None:
value = tens
elif isinstance(value, str):
value = str(value) + str(tens)
else:
if value % 100 == 0:
value += tens
else:
value = str(value) + str(tens)
elif current in self.tens_suffixed:
# ordinal or cardinal; yield the number right away
tens, suffix = self.tens_suffixed[current]
if value is None:
yield output(str(tens) + suffix)
elif isinstance(value, str):
yield output(str(value) + str(tens) + suffix)
else:
if value % 100 == 0:
yield output(str(value + tens) + suffix)
else:
yield output(str(value) + str(tens) + suffix)
elif current in self.multipliers:
multiplier = self.multipliers[current]
if value is None:
value = multiplier
elif isinstance(value, str) or value == 0:
f = to_fraction(value)
p = f * multiplier if f is not None else None
if f is not None and p.denominator == 1:
value = p.numerator
else:
yield output(value)
value = multiplier
else:
before = value // 1000 * 1000
residual = value % 1000
value = before + residual * multiplier
elif current in self.multipliers_suffixed:
multiplier, suffix = self.multipliers_suffixed[current]
if value is None:
yield output(str(multiplier) + suffix)
elif isinstance(value, str):
f = to_fraction(value)
p = f * multiplier if f is not None else None
if f is not None and p.denominator == 1:
yield output(str(p.numerator) + suffix)
else:
yield output(value)
yield output(str(multiplier) + suffix)
else: # int
before = value // 1000 * 1000
residual = value % 1000
value = before + residual * multiplier
yield output(str(value) + suffix)
value = None
elif current in self.preceding_prefixers:
# apply prefix (positive, minus, etc.) if it precedes a number
if value is not None:
yield output(value)
if next in self.words or next_is_numeric:
prefix = self.preceding_prefixers[current]
else:
yield output(current)
elif current in self.following_prefixers:
# apply prefix (dollars, cents, etc.) only after a number
if value is not None:
prefix = self.following_prefixers[current]
yield output(value)
else:
yield output(current)
elif current in self.suffixers:
# apply suffix symbols (percent -> '%')
if value is not None:
suffix = self.suffixers[current]
if isinstance(suffix, dict):
if next in suffix:
yield output(str(value) + suffix[next])
skip = True
else:
yield output(value)
yield output(current)
else:
yield output(str(value) + suffix)
else:
yield output(current)
elif current in self.specials:
if next not in self.words and not next_is_numeric:
# apply special handling only if the next word can be numeric
if value is not None:
yield output(value)
yield output(current)
elif current == "and":
# ignore "and" after hundreds, thousands, etc.
if prev not in self.multipliers:
if value is not None:
yield output(value)
yield output(current)
elif current == "double" or current == "triple":
if next in self.ones or next in self.zeros:
repeats = 2 if current == "double" else 3
ones = self.ones.get(next, 0)
value = str(value or "") + str(ones) * repeats
skip = True
else:
if value is not None:
yield output(value)
yield output(current)
elif current == "point":
if next in self.decimals or next_is_numeric:
value = str(value or "") + "."
else:
# should all have been covered at this point
raise ValueError(f"Unexpected token: {current}")
else:
# all should have been covered at this point
raise ValueError(f"Unexpected token: {current}")
if value is not None:
yield output(value)
def preprocess(self, s: str):
# replace "<number> and a half" with "<number> point five"
results = []
segments = re.split(r"\band\s+a\s+half\b", s)
for i, segment in enumerate(segments):
if len(segment.strip()) == 0:
continue
if i == len(segments) - 1:
results.append(segment)
else:
results.append(segment)
last_word = segment.rsplit(maxsplit=2)[-1]
if last_word in self.decimals or last_word in self.multipliers:
results.append("point five")
else:
results.append("and a half")
s = " ".join(results)
# put a space at number/letter boundary
s = re.sub(r"([a-z])([0-9])", r"\1 \2", s)
s = re.sub(r"([0-9])([a-z])", r"\1 \2", s)
# but remove spaces which could be a suffix
s = re.sub(r"([0-9])\s+(st|nd|rd|th|s)\b", r"\1\2", s)
return s
def postprocess(self, s: str):
def combine_cents(m: Match):
try:
currency = m.group(1)
integer = m.group(2)
cents = int(m.group(3))
return f"{currency}{integer}.{cents:02d}"
except ValueError:
return m.string
def extract_cents(m: Match):
try:
return f"¢{int(m.group(1))}"
except ValueError:
return m.string
# apply currency postprocessing; "$2 and ¢7" -> "$2.07"
s = re.sub(r"([€£$])([0-9]+) (?:and )?¢([0-9]{1,2})\b", combine_cents, s)
s = re.sub(r"[€£$]0.([0-9]{1,2})\b", extract_cents, s)
# write "one(s)" instead of "1(s)", just for the readability
s = re.sub(r"\b1(s?)\b", r"one\1", s)
return s
def __call__(self, s: str):
s = self.preprocess(s)
s = " ".join(word for word in self.process_words(s.split()) if word is not None)
s = self.postprocess(s)
return s
class EnglishSpellingNormalizer:
"""
Applies British-American spelling mappings as listed in [1].
[1] https://www.tysto.com/uk-us-spelling-list.html
"""
def __init__(self, english_spelling_mapping):
self.mapping = english_spelling_mapping
def __call__(self, s: str):
return " ".join(self.mapping.get(word, word) for word in s.split())
class EnglishTextNormalizer:
def __init__(self, english_spelling_mapping):
self.ignore_patterns = r"\b(hmm|mm|mhm|mmm|uh|um)\b"
self.replacers = {
# common contractions
r"\bwon't\b": "will not",
r"\bcan't\b": "can not",
r"\blet's\b": "let us",
r"\bain't\b": "aint",
r"\by'all\b": "you all",
r"\bwanna\b": "want to",
r"\bgotta\b": "got to",
r"\bgonna\b": "going to",
r"\bi'ma\b": "i am going to",
r"\bimma\b": "i am going to",
r"\bwoulda\b": "would have",
r"\bcoulda\b": "could have",
r"\bshoulda\b": "should have",
r"\bma'am\b": "madam",
# contractions in titles/prefixes
r"\bmr\b": "mister ",
r"\bmrs\b": "missus ",
r"\bst\b": "saint ",
r"\bdr\b": "doctor ",
r"\bprof\b": "professor ",
r"\bcapt\b": "captain ",
r"\bgov\b": "governor ",
r"\bald\b": "alderman ",
r"\bgen\b": "general ",
r"\bsen\b": "senator ",
r"\brep\b": "representative ",
r"\bpres\b": "president ",
r"\brev\b": "reverend ",
r"\bhon\b": "honorable ",
r"\basst\b": "assistant ",
r"\bassoc\b": "associate ",
r"\blt\b": "lieutenant ",
r"\bcol\b": "colonel ",
r"\bjr\b": "junior ",
r"\bsr\b": "senior ",
r"\besq\b": "esquire ",
# prefect tenses, ideally it should be any past participles, but it's harder..
r"'d been\b": " had been",
r"'s been\b": " has been",
r"'d gone\b": " had gone",
r"'s gone\b": " has gone",
r"'d done\b": " had done", # "'s done" is ambiguous
r"'s got\b": " has got",
# general contractions
r"n't\b": " not",
r"'re\b": " are",
r"'s\b": " is",
r"'d\b": " would",
r"'ll\b": " will",
r"'t\b": " not",
r"'ve\b": " have",
r"'m\b": " am",
}
self.standardize_numbers = EnglishNumberNormalizer()
self.standardize_spellings = EnglishSpellingNormalizer(english_spelling_mapping)
def __call__(self, s: str):
s = s.lower()
s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets
s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis
s = re.sub(self.ignore_patterns, "", s)
s = re.sub(r"\s+'", "'", s) # standardize when there's a space before an apostrophe
for pattern, replacement in self.replacers.items():
s = re.sub(pattern, replacement, s)
s = re.sub(r"(\d),(\d)", r"\1\2", s) # remove commas between digits
s = re.sub(r"\.([^0-9]|$)", r" \1", s) # remove periods not followed by numbers
s = remove_symbols_and_diacritics(s, keep=".%$¢€£") # keep some symbols for numerics
s = self.standardize_numbers(s)
s = self.standardize_spellings(s)
# now remove prefix/suffix symbols that are not preceded/followed by numbers
s = re.sub(r"[.$¢€£]([^0-9])", r" \1", s)
s = re.sub(r"([^0-9])%", r"\1 ", s)
s = re.sub(r"\s+", " ", s) # replace any successive whitespace characters with a space
return s
| transformers/src/transformers/models/whisper/english_normalizer.py/0 | {
"file_path": "transformers/src/transformers/models/whisper/english_normalizer.py",
"repo_id": "transformers",
"token_count": 12170
} | 551 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import io
import re
import torch
import yaml
from transformers import (
AutoConfig,
DacFeatureExtractor,
XcodecConfig,
XcodecModel,
logging,
)
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
torch.serialization.add_safe_globals([io.BytesIO])
MAPPING_ACOUSTIC_ENCODER = {
r"^block\.0": ["conv1"],
r"^block\.(\d+)\.block\.(\d+)\.block\.0": ["block", "res_unit", "snake1"],
r"^block\.(\d+)\.block\.(\d+)\.block\.1": ["block", "res_unit", "conv1"],
r"^block\.(\d+)\.block\.(\d+)\.block\.2": ["block", "res_unit", "snake2"],
r"^block\.(\d+)\.block\.(\d+)\.block\.3": ["block", "res_unit", "conv2"],
r"^block\.(\d+)\.block\.3": ["block", "snake1"],
r"^block\.(\d+)\.block\.4": ["block", "conv1"],
r"^block\.5": ["snake1"],
r"^block\.6": ["conv2"],
}
MAPPING_ACOUSTIC_DECODER = {
r"^model\.0": ["conv1"],
r"^model\.(\d+)\.block\.0": ["block", "snake1"],
r"^model\.(\d+)\.block\.1": ["block", "conv_t1"],
r"^model\.(\d+)\.block\.(\d+)\.block\.0": ["block", "res_unit", "snake1"],
r"^model\.(\d+)\.block\.(\d+)\.block\.1": ["block", "res_unit", "conv1"],
r"^model\.(\d+)\.block\.(\d+)\.block\.2": ["block", "res_unit", "snake2"],
r"^model\.(\d+)\.block\.(\d+)\.block\.3": ["block", "res_unit", "conv2"],
r"^model\.5": ["snake1"],
r"^model\.6": ["conv2"],
}
MAPPING_SEMANTIC_ENCODER = {
"conv.conv.": "conv.",
"conv1.conv.": "conv1.",
"conv2.conv.": "conv2.",
}
MAPPING_SEMANTIC_DECODER = {
"conv1.conv.": "conv1.",
"conv2.conv.": "conv2.",
"conv.conv.": "conv.",
}
MAPPING_QUANTIZER = {
"quantizer.vq.layers": "quantizer.quantizers",
"._codebook.": ".codebook.",
}
def safe_load(path: str) -> dict[str, torch.Tensor]:
"""
Load only the tensor objects from a checkpoint, skipping any BytesIO
"""
shard = torch.load(path, map_location="cpu", weights_only=True)
return {k: v for k, v in shard.items() if not isinstance(v, io.BytesIO)}
def _rewrite_weight_norm(key: str) -> str:
if key.endswith("weight_g"):
return key[: -len("weight_g")] + "parametrizations.weight.original0"
if key.endswith("weight_v"):
return key[: -len("weight_v")] + "parametrizations.weight.original1"
return key
def convert_old_keys_to_new_keys(original_state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
converted_checkpoint: dict[str, torch.Tensor] = {}
for old_key, value in original_state_dict.items():
if old_key.startswith("encoder."):
layer_key = old_key[len("encoder.") :]
for pattern, path_parts in MAPPING_ACOUSTIC_ENCODER.items():
pattern_match = re.match(pattern, layer_key)
if pattern_match is None:
continue
digit_strings = [g for g in pattern_match.groups() if g is not None]
digit_indices = [int(ds) for ds in digit_strings]
remainder = layer_key[pattern_match.end() :]
if len(path_parts) == 1:
mapped_subkey = f"{path_parts[0]}{remainder}"
elif len(path_parts) == 2:
encoder_layer = digit_indices[0] - 1
mapped_subkey = f"{path_parts[0]}.{encoder_layer}.{path_parts[1]}{remainder}"
else:
encoder_layer, unit_idx = digit_indices
mapped_subkey = (
f"{path_parts[0]}.{encoder_layer - 1}.{path_parts[1]}{unit_idx + 1}.{path_parts[2]}{remainder}"
)
new_key = f"acoustic_encoder.{_rewrite_weight_norm(mapped_subkey)}"
converted_checkpoint[new_key] = value
break
elif old_key.startswith("decoder_2."):
layer_key = old_key[len("decoder_2.") :]
for pattern, path_parts in MAPPING_ACOUSTIC_DECODER.items():
pattern_match = re.match(pattern, layer_key)
if pattern_match is None:
continue
digit_strings = [g for g in pattern_match.groups() if g is not None]
digit_indices = [int(ds) for ds in digit_strings]
remainder = layer_key[pattern_match.end() :]
if len(path_parts) == 1:
mapped_subkey = f"{path_parts[0]}{remainder}"
elif len(path_parts) == 2:
decoder_layer = digit_indices[0] - 1
mapped_subkey = f"{path_parts[0]}.{decoder_layer}.{path_parts[1]}{remainder}"
else:
decoder_layer, unit_idx = digit_indices
mapped_subkey = (
f"{path_parts[0]}.{decoder_layer - 1}.{path_parts[1]}{unit_idx - 1}.{path_parts[2]}{remainder}"
)
new_key = f"acoustic_decoder.{_rewrite_weight_norm(mapped_subkey)}"
converted_checkpoint[new_key] = value
break
elif old_key.startswith("encoder_semantic."):
semantic_key = old_key[len("encoder_semantic.") :]
for old, new in MAPPING_SEMANTIC_ENCODER.items():
semantic_key = semantic_key.replace(old, new)
converted_checkpoint[f"encoder_semantic.{semantic_key}"] = value
elif old_key.startswith("decoder_semantic."):
semantic_key = old_key[len("decoder_semantic.") :]
for old, new in MAPPING_SEMANTIC_DECODER.items():
semantic_key = semantic_key.replace(old, new)
converted_checkpoint[f"decoder_semantic.{semantic_key}"] = value
elif old_key.startswith("semantic_model."):
converted_checkpoint[old_key] = value
elif old_key.startswith("fc_prior."):
converted_checkpoint[f"fc.{old_key[len('fc_prior.') :]}"] = value
elif old_key.startswith("fc_post1."):
converted_checkpoint[f"fc1.{old_key[len('fc_post1.') :]}"] = value
elif old_key.startswith("fc_post2."):
converted_checkpoint[f"fc2.{old_key[len('fc_post2.') :]}"] = value
elif old_key.startswith("quantizer.vq.layers"):
new_key = old_key
for old_sub, new_sub in MAPPING_QUANTIZER.items():
new_key = new_key.replace(old_sub, new_sub)
converted_checkpoint[new_key] = value
return converted_checkpoint
# for reference, see original implementation: https://github.com/zhenye234/xcodec/blob/main/models/soundstream_semantic.py#L24
@torch.no_grad()
def convert_checkpoint(checkpoint_path, config_path, pytorch_dump_folder_path=None, push_to_hub=None):
# load config yaml file
with open(config_path, "r") as f:
model_config = yaml.safe_load(f)
# extra relevant parameters
ratios = model_config["generator"]["config"]["ratios"]
target_bandwidths = model_config["generator"]["config"]["target_bandwidths"]
sample_rate = model_config["generator"]["config"]["sample_rate"]
acoustic_model_config = {
"encoder_hidden_size": 64,
"decoder_hidden_size": 1024,
# NOTE: original DAC uses [2, 4, 8, 8] `downsampling ratios`, namely reverse of `upsampling_ratios`
# (not sure if intentional by Xcodec but we keep it)
"downsampling_ratios": ratios,
"upsampling_ratios": ratios,
"sampling_rate": sample_rate,
"hidden_size": model_config["generator"]["config"]["D"],
}
semantic_model = model_config["generator"]["config"]["semantic_techer"]
if semantic_model == "hubert_base":
semantic_model_config = AutoConfig.from_pretrained("facebook/hubert-base-ls960")
elif semantic_model == "wavlm_base_plus":
semantic_model_config = AutoConfig.from_pretrained("microsoft/wavlm-base-plus")
elif semantic_model == "hubert_base_general":
semantic_model_config = AutoConfig.from_pretrained("ZhenYe234/hubert_base_general_audio")
else:
raise ValueError(f"Unknown semantic model: {semantic_model}")
config = XcodecConfig(
target_bandwidths=target_bandwidths,
acoustic_model_config=acoustic_model_config,
semantic_model_config=semantic_model_config,
sample_rate=sample_rate,
codebook_size=model_config["generator"]["config"]["bins"],
)
# create model
if not torch.cuda.is_available():
raise ValueError("Run this script on a machine with a GPU for weight norm layers to be correctly copied.")
torch_device = "cuda"
model = XcodecModel(config).to(torch_device)
logger.info("Loading original checkpoint ...")
state_dict = safe_load(checkpoint_path)
# the original checkpoint has weight norm applied
model.apply_weight_norm()
logger.info("Converting model ...")
new_state_dict = convert_old_keys_to_new_keys(state_dict)
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=True, assign=True) # strict=False)
if len(unexpected_keys) != 0:
raise ValueError(f"Unexpected keys: {unexpected_keys}")
if len(missing_keys) != 0:
raise ValueError(f"missing keys found: {missing_keys}")
model.remove_weight_norm()
if pytorch_dump_folder_path is not None:
model.save_pretrained(pytorch_dump_folder_path)
feature_extractor = DacFeatureExtractor(
sampling_rate=config.sample_rate,
hop_length=config.acoustic_model_config.hop_length,
)
if pytorch_dump_folder_path is not None:
feature_extractor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print("Pushing to the hub...")
feature_extractor.push_to_hub(push_to_hub)
model.push_to_hub(push_to_hub)
"""
Models checkpoints can be downloaded from here:
https://github.com/zhenye234/xcodec?tab=readme-ov-file#available-models
1) `xcodec_hubert_librispeech`:
```
# Download config and checkpoint files
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/config_hubert.yaml -P /raid/eric/xcodec_original
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/xcodec_speech_hubert_librispeech.pth -P /raid/eric/xcodec_original
# Run conversion:
python src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py \
--checkpoint_path /raid/eric/xcodec_original/xcodec_speech_hubert_librispeech.pth \
--config_path /raid/eric/xcodec_original/config_hubert.yaml \
--push_to_hub hf-audio/xcodec-hubert-librispeech
```
2) `xcodec_hubert_general_audio`:
```
# Download config and checkpoint files
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/config_hubert_general.yaml -P /raid/eric/xcodec_original
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/xcodec_hubert_general_audio.pth -P /raid/eric/xcodec_original
# Run conversion:
python src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py \
--checkpoint_path /raid/eric/xcodec_original/xcodec_hubert_general_audio.pth \
--config_path /raid/eric/xcodec_original/config_hubert_general.yaml \
--push_to_hub hf-audio/xcodec-hubert-general
```
3) `xcodec_hubert_general_audio_more_data` (more balanced dataset):
```
# Download config and checkpoint files
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/config_hubert_general.yaml -P /raid/eric/xcodec_original
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/xcodec_hubert_general_audio_v2.pth -P /raid/eric/xcodec_original
# Run conversion:
python src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py \
--checkpoint_path /raid/eric/xcodec_original/xcodec_hubert_general_audio_v2.pth \
--config_path /raid/eric/xcodec_original/config_hubert_general.yaml \
--push_to_hub hf-audio/xcodec-hubert-general-balanced
```
4) `xcodec_wavlm_mls`:
```
# Download config and checkpoint files
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/config_wavlm.yaml -P /raid/eric/xcodec_original
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/xcodec_speech_wavlm_mls.pth -P /raid/eric/xcodec_original
# Run conversion:
python src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py \
--checkpoint_path /raid/eric/xcodec_original/xcodec_speech_wavlm_mls.pth \
--config_path /raid/eric/xcodec_original/config_wavlm.yaml \
--push_to_hub hf-audio/xcodec-wavlm-mls
```
5) `xcodec_wavlm_more_data`:
```
# Download config and checkpoint files
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/config_wavlm.yaml -P /raid/eric/xcodec_original
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/xcodec_speech_wavlm_more_data.pth -P /raid/eric/xcodec_original
# Run conversion:
python src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py \
--checkpoint_path /raid/eric/xcodec_original/xcodec_speech_wavlm_more_data.pth \
--config_path /raid/eric/xcodec_original/config_wavlm.yaml \
--push_to_hub hf-audio/xcodec-wavlm-more-data
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument(
"--config_path", required=True, default=None, type=str, help="Path to hf config.yaml of model to convert"
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
args = parser.parse_args()
convert_checkpoint(
args.checkpoint_path,
args.config_path,
args.pytorch_dump_folder_path,
args.push_to_hub,
)
| transformers/src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py",
"repo_id": "transformers",
"token_count": 6258
} | 552 |
from typing import Optional, Union
from transformers.models.detr.image_processing_detr_fast import DetrImageProcessorFast
from ...image_transforms import center_to_corners_format
from ...utils import (
TensorType,
is_torch_available,
logging,
)
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
def get_size_with_aspect_ratio(
image_size: tuple[int, int], size: int, max_size: Optional[int] = None, mod_size: int = 16
) -> tuple[int, int]:
"""
Computes the output image size given the input image size and the desired output size with multiple of divisible_size.
Args:
image_size (`tuple[int, int]`):
The input image size.
size (`int`):
The desired output size.
max_size (`int`, *optional*):
The maximum allowed output size.
mod_size (`int`, *optional*):
The size to make multiple of mod_size.
"""
height, width = image_size
raw_size = None
if max_size is not None:
min_original_size = float(min((height, width)))
max_original_size = float(max((height, width)))
if max_original_size / min_original_size * size > max_size:
raw_size = max_size * min_original_size / max_original_size
size = int(round(raw_size))
if width < height:
ow = size
if max_size is not None and raw_size is not None:
oh = int(raw_size * height / width)
else:
oh = int(size * height / width)
elif (height <= width and height == size) or (width <= height and width == size):
oh, ow = height, width
else:
oh = size
if max_size is not None and raw_size is not None:
ow = int(raw_size * width / height)
else:
ow = int(size * width / height)
if mod_size is not None:
ow_mod = torch.remainder(torch.tensor(ow), mod_size).item()
oh_mod = torch.remainder(torch.tensor(oh), mod_size).item()
ow = ow - ow_mod
oh = oh - oh_mod
return (oh, ow)
class YolosImageProcessorFast(DetrImageProcessorFast):
def post_process(self, outputs, target_sizes):
"""
Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`YolosObjectDetectionOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the
original image size (before any data augmentation). For visualization, this should be the image size
after data augment, but before padding.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
logger.warning_once(
"`post_process` is deprecated and will be removed in v5 of Transformers, please use"
" `post_process_object_detection` instead, with `threshold=0.` for equivalent results.",
)
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if len(out_logits) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
if target_sizes.shape[1] != 2:
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
return results
def post_process_object_detection(
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, list[tuple]] = None, top_k: int = 100
):
"""
Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`YolosObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
top_k (`int`, *optional*, defaults to 100):
Keep only top k bounding boxes before filtering by thresholding.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
prob = out_logits.sigmoid()
prob = prob.view(out_logits.shape[0], -1)
k_value = min(top_k, prob.size(1))
topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({"scores": score, "labels": label, "boxes": box})
return results
def post_process_segmentation():
raise NotImplementedError("Segmentation post-processing is not implemented for Deformable DETR yet.")
def post_process_instance():
raise NotImplementedError("Instance post-processing is not implemented for Deformable DETR yet.")
def post_process_panoptic():
raise NotImplementedError("Panoptic post-processing is not implemented for Deformable DETR yet.")
def post_process_instance_segmentation():
raise NotImplementedError("Segmentation post-processing is not implemented for Deformable DETR yet.")
def post_process_semantic_segmentation():
raise NotImplementedError("Semantic segmentation post-processing is not implemented for Deformable DETR yet.")
def post_process_panoptic_segmentation():
raise NotImplementedError("Panoptic segmentation post-processing is not implemented for Deformable DETR yet.")
__all__ = ["YolosImageProcessorFast"]
| transformers/src/transformers/models/yolos/modular_yolos.py/0 | {
"file_path": "transformers/src/transformers/models/yolos/modular_yolos.py",
"repo_id": "transformers",
"token_count": 3467
} | 553 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for ZoeDepth."""
from typing import (
Optional,
Union,
)
import numpy as np
from ...image_processing_utils import (
BatchFeature,
)
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
DefaultFastImageProcessorKwargs,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
get_image_size,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
is_torch_available,
is_torchvision_available,
is_torchvision_v2_available,
logging,
requires_backends,
)
from .image_processing_zoedepth import get_resize_output_image_size
from .modeling_zoedepth import ZoeDepthDepthEstimatorOutput
if is_torch_available():
import torch
if is_torchvision_available():
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F
else:
from torchvision.transforms import functional as F
from torchvision.transforms import InterpolationMode
logger = logging.get_logger(__name__)
class ZoeDepthFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
"""
do_pad (`bool`, *optional*, defaults to `True`):
Whether to apply pad the input.
keep_aspect_ratio (`bool`, *optional*, defaults to `True`):
If `True`, the image is resized by choosing the smaller of the height and width scaling factors and using it
for both dimensions. This ensures that the image is scaled down as little as possible while still fitting
within the desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a
size that is a multiple of this value by flooring the height and width to the nearest multiple of this value.
Can be overridden by `keep_aspect_ratio` in `preprocess`.
ensure_multiple_of (`int`, *optional*, defaults to 32):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring
the height and width to the nearest multiple of this value.
Works both with and without `keep_aspect_ratio` being set to `True`.
Can be overridden by `ensure_multiple_of` in `preprocess`.
"""
do_pad: Optional[bool]
keep_aspect_ratio: Optional[bool]
ensure_multiple_of: Optional[int]
@auto_docstring
class ZoeDepthImageProcessorFast(BaseImageProcessorFast):
do_pad = True
do_rescale = True
do_normalize = True
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
do_resize = True
size = {"height": 384, "width": 512}
resample = PILImageResampling.BILINEAR
keep_aspect_ratio = True
ensure_multiple_of = 1 / 32
valid_kwargs = ZoeDepthFastImageProcessorKwargs
def __init__(self, **kwargs: Unpack[ZoeDepthFastImageProcessorKwargs]) -> None:
super().__init__(**kwargs)
@auto_docstring
def preprocess(
self,
images: ImageInput,
**kwargs: Unpack[ZoeDepthFastImageProcessorKwargs],
) -> BatchFeature:
return super().preprocess(images, **kwargs)
def resize(
self,
images: "torch.Tensor",
size: SizeDict,
keep_aspect_ratio: bool = False,
ensure_multiple_of: int = 1,
interpolation: Optional["F.InterpolationMode"] = None,
) -> "torch.Tensor":
"""
Resize an image or batchd images to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
set, the image is resized to a size that is a multiple of this value.
Args:
images (`torch.Tensor`):
Images to resize.
size (`dict[str, int]`):
Target size of the output image.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
ensure_multiple_of (`int`, *optional*, defaults to 1):
The image is resized to a size that is a multiple of this value.
interpolation (`F.InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size
specified in `size`.
"""
if not size.height or not size.width:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size}")
output_size = get_resize_output_image_size(
images,
output_size=(size.height, size.width),
keep_aspect_ratio=keep_aspect_ratio,
multiple=ensure_multiple_of,
input_data_format=ChannelDimension.FIRST,
)
height, width = output_size
resized_images = torch.nn.functional.interpolate(
images, (int(height), int(width)), mode=interpolation.value, align_corners=True
)
return resized_images
def _pad_images(
self,
images: "torch.Tensor",
):
"""
Args:
image (`torch.Tensor`):
Image to pad.
"""
height, width = get_image_size(images, channel_dim=ChannelDimension.FIRST)
pad_height = int(np.sqrt(height / 2) * 3)
pad_width = int(np.sqrt(width / 2) * 3)
return F.pad(images, padding=(pad_width, pad_height), padding_mode="reflect")
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
keep_aspect_ratio: Optional[bool],
ensure_multiple_of: Optional[int],
interpolation: Optional["F.InterpolationMode"],
do_pad: bool,
do_rescale: bool,
rescale_factor: Optional[float],
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_rescale:
stacked_images = self.rescale(stacked_images, rescale_factor)
if do_pad:
stacked_images = self._pad_images(images=stacked_images)
if do_resize:
stacked_images = self.resize(
stacked_images, size, keep_aspect_ratio, ensure_multiple_of, interpolation
)
if do_normalize:
stacked_images = self.normalize(stacked_images, image_mean, image_std)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
processed_images = torch.stack(resized_images, dim=0) if return_tensors else resized_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_depth_estimation(
self,
outputs: "ZoeDepthDepthEstimatorOutput",
source_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None,
target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None,
outputs_flipped: Optional[Union["ZoeDepthDepthEstimatorOutput", None]] = None,
do_remove_padding: Optional[Union[bool, None]] = None,
) -> list[dict[str, TensorType]]:
"""
Converts the raw output of [`ZoeDepthDepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`ZoeDepthDepthEstimatorOutput`]):
Raw outputs of the model.
source_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the source size
(height, width) of each image in the batch before preprocessing. This argument should be dealt as
"required" unless the user passes `do_remove_padding=False` as input to this function.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
outputs_flipped ([`ZoeDepthDepthEstimatorOutput`], *optional*):
Raw outputs of the model from flipped input (averaged out in the end).
do_remove_padding (`bool`, *optional*):
By default ZoeDepth adds padding equal to `int(√(height / 2) * 3)` (and similarly for width) to fix the
boundary artifacts in the output depth map, so we need remove this padding during post_processing. The
parameter exists here in case the user changed the image preprocessing to not include padding.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
"""
requires_backends(self, "torch")
predicted_depth = outputs.predicted_depth
if (outputs_flipped is not None) and (predicted_depth.shape != outputs_flipped.predicted_depth.shape):
raise ValueError("Make sure that `outputs` and `outputs_flipped` have the same shape")
if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the predicted depth"
)
if do_remove_padding is None:
do_remove_padding = self.do_pad
if source_sizes is None and do_remove_padding:
raise ValueError(
"Either `source_sizes` should be passed in, or `do_remove_padding` should be set to False"
)
if (source_sizes is not None) and (len(predicted_depth) != len(source_sizes)):
raise ValueError(
"Make sure that you pass in as many source image sizes as the batch dimension of the logits"
)
if outputs_flipped is not None:
predicted_depth = (predicted_depth + torch.flip(outputs_flipped.predicted_depth, dims=[-1])) / 2
predicted_depth = predicted_depth.unsqueeze(1)
# Zoe Depth model adds padding around the images to fix the boundary artifacts in the output depth map
# The padding length is `int(np.sqrt(img_h/2) * fh)` for the height and similar for the width
# fh (and fw respectively) are equal to '3' by default
# Check [here](https://github.com/isl-org/ZoeDepth/blob/edb6daf45458569e24f50250ef1ed08c015f17a7/zoedepth/models/depth_model.py#L57)
# for the original implementation.
# In this section, we remove this padding to get the final depth image and depth prediction
padding_factor_h = padding_factor_w = 3
results = []
target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
source_sizes = [None] * len(predicted_depth) if source_sizes is None else source_sizes
for depth, target_size, source_size in zip(predicted_depth, target_sizes, source_sizes):
# depth.shape = [1, H, W]
if source_size is not None:
pad_h = pad_w = 0
if do_remove_padding:
pad_h = int(np.sqrt(source_size[0] / 2) * padding_factor_h)
pad_w = int(np.sqrt(source_size[1] / 2) * padding_factor_w)
depth = F.resize(
depth,
size=[source_size[0] + 2 * pad_h, source_size[1] + 2 * pad_w],
interpolation=InterpolationMode.BICUBIC,
antialias=False,
)
if pad_h > 0:
depth = depth[:, pad_h:-pad_h, :]
if pad_w > 0:
depth = depth[:, :, pad_w:-pad_w]
if target_size is not None:
target_size = [target_size[0], target_size[1]]
depth = F.resize(
depth,
size=target_size,
interpolation=InterpolationMode.BICUBIC,
antialias=False,
)
depth = depth.squeeze(0)
# depth.shape = [H, W]
results.append({"predicted_depth": depth})
return results
__all__ = ["ZoeDepthImageProcessorFast"]
| transformers/src/transformers/models/zoedepth/image_processing_zoedepth_fast.py/0 | {
"file_path": "transformers/src/transformers/models/zoedepth/image_processing_zoedepth_fast.py",
"repo_id": "transformers",
"token_count": 5748
} | 554 |
# Copyright 2022 The Impira Team and the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Any, Optional, Union, overload
import numpy as np
from ..generation import GenerationConfig
from ..utils import (
ExplicitEnum,
add_end_docstrings,
is_pytesseract_available,
is_torch_available,
is_vision_available,
logging,
)
from .base import ChunkPipeline, build_pipeline_init_args
from .question_answering import select_starts_ends
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES
TESSERACT_LOADED = False
if is_pytesseract_available():
TESSERACT_LOADED = True
import pytesseract
logger = logging.get_logger(__name__)
# normalize_bbox() and apply_tesseract() are derived from apply_tesseract in models/layoutlmv3/feature_extraction_layoutlmv3.py.
# However, because the pipeline may evolve from what layoutlmv3 currently does, it's copied (vs. imported) to avoid creating an
# unnecessary dependency.
def normalize_box(box, width, height):
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: Optional[str]):
"""Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes."""
# apply OCR
data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config)
words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()]
words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices]
left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices]
top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices]
width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices]
height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
actual_boxes = []
for x, y, w, h in zip(left, top, width, height):
actual_box = [x, y, x + w, y + h]
actual_boxes.append(actual_box)
image_width, image_height = image.size
# finally, normalize the bounding boxes
normalized_boxes = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(box, image_width, image_height))
if len(words) != len(normalized_boxes):
raise ValueError("Not as many words as there are bounding boxes")
return words, normalized_boxes
class ModelType(ExplicitEnum):
LayoutLM = "layoutlm"
LayoutLMv2andv3 = "layoutlmv2andv3"
VisionEncoderDecoder = "vision_encoder_decoder"
@add_end_docstrings(build_pipeline_init_args(has_image_processor=True, has_tokenizer=True))
class DocumentQuestionAnsweringPipeline(ChunkPipeline):
# TODO: Update task_summary docs to include an example with document QA and then update the first sentence
"""
Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are
similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR'd
words/boxes) as input instead of text context.
Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
Example:
```python
>>> from transformers import pipeline
>>> document_qa = pipeline(model="impira/layoutlm-document-qa")
>>> document_qa(
... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png",
... question="What is the invoice number?",
... )
[{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This document question answering pipeline can currently be loaded from [`pipeline`] using the following task
identifier: `"document-question-answering"`.
The models that this pipeline can use are models that have been fine-tuned on a document question answering task.
See the up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=document-question-answering).
"""
_pipeline_calls_generate = True
_load_processor = False
_load_image_processor = None
_load_feature_extractor = None
_load_tokenizer = True
# Make sure the docstring is updated when the default generation config is changed
_default_generation_config = GenerationConfig(
max_new_tokens=256,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.tokenizer is not None and not self.tokenizer.__class__.__name__.endswith("Fast"):
raise ValueError(
"`DocumentQuestionAnsweringPipeline` requires a fast tokenizer, but a slow tokenizer "
f"(`{self.tokenizer.__class__.__name__}`) is provided."
)
if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig":
self.model_type = ModelType.VisionEncoderDecoder
if self.model.config.encoder.model_type != "donut-swin":
raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut")
else:
self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES)
if self.model.config.__class__.__name__ == "LayoutLMConfig":
self.model_type = ModelType.LayoutLM
else:
self.model_type = ModelType.LayoutLMv2andv3
def _sanitize_parameters(
self,
padding=None,
doc_stride=None,
max_question_len=None,
lang: Optional[str] = None,
tesseract_config: Optional[str] = None,
max_answer_len=None,
max_seq_len=None,
top_k=None,
handle_impossible_answer=None,
timeout=None,
**kwargs,
):
preprocess_params, postprocess_params = {}, {}
if padding is not None:
preprocess_params["padding"] = padding
if doc_stride is not None:
preprocess_params["doc_stride"] = doc_stride
if max_question_len is not None:
preprocess_params["max_question_len"] = max_question_len
if max_seq_len is not None:
preprocess_params["max_seq_len"] = max_seq_len
if lang is not None:
preprocess_params["lang"] = lang
if tesseract_config is not None:
preprocess_params["tesseract_config"] = tesseract_config
if timeout is not None:
preprocess_params["timeout"] = timeout
if top_k is not None:
if top_k < 1:
raise ValueError(f"top_k parameter should be >= 1 (got {top_k})")
postprocess_params["top_k"] = top_k
if max_answer_len is not None:
if max_answer_len < 1:
raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}")
postprocess_params["max_answer_len"] = max_answer_len
if handle_impossible_answer is not None:
postprocess_params["handle_impossible_answer"] = handle_impossible_answer
forward_params = {}
if getattr(self, "assistant_model", None) is not None:
forward_params["assistant_model"] = self.assistant_model
if getattr(self, "assistant_tokenizer", None) is not None:
forward_params["tokenizer"] = self.tokenizer
forward_params["assistant_tokenizer"] = self.assistant_tokenizer
return preprocess_params, forward_params, postprocess_params
@overload
def __call__(
self,
image: Union["Image.Image", str],
question: str,
word_boxes: Optional[tuple[str, list[float]]] = None,
**kwargs: Any,
) -> list[dict[str, Any]]: ...
@overload
def __call__(self, image: dict[str, Any], **kwargs: Any) -> list[dict[str, Any]]: ...
@overload
def __call__(self, image: list[dict[str, Any]], **kwargs: Any) -> list[list[dict[str, Any]]]: ...
def __call__(
self,
image: Union["Image.Image", str, list[dict[str, Any]]],
question: Optional[str] = None,
word_boxes: Optional[tuple[str, list[float]]] = None,
**kwargs: Any,
) -> Union[dict[str, Any], list[dict[str, Any]]]:
"""
Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an
optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not
provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for
LayoutLM-like models which require them as input. For Donut, no OCR is run.
You can invoke the pipeline several ways:
- `pipeline(image=image, question=question)`
- `pipeline(image=image, question=question, word_boxes=word_boxes)`
- `pipeline([{"image": image, "question": question}])`
- `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])`
Args:
image (`str` or `PIL.Image`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images. If given a single image, it can be
broadcasted to multiple questions.
question (`str`):
A question to ask of the document.
word_boxes (`list[str, tuple[float, float, float, float]]`, *optional*):
A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the
pipeline will use these words and boxes instead of running OCR on the image to derive them for models
that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the
pipeline without having to re-run it each time.
top_k (`int`, *optional*, defaults to 1):
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
top_k answers if there are not enough options available within the context.
doc_stride (`int`, *optional*, defaults to 128):
If the words in the document are too long to fit with the question for the model, it will be split in
several chunks with some overlap. This argument controls the size of that overlap.
max_answer_len (`int`, *optional*, defaults to 15):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_seq_len (`int`, *optional*, defaults to 384):
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
model. The context will be split in several chunks (using `doc_stride` as overlap) if needed.
max_question_len (`int`, *optional*, defaults to 64):
The maximum length of the question after tokenization. It will be truncated if needed.
handle_impossible_answer (`bool`, *optional*, defaults to `False`):
Whether or not we accept impossible as an answer.
lang (`str`, *optional*):
Language to use while running OCR. Defaults to english.
tesseract_config (`str`, *optional*):
Additional flags to pass to tesseract while running OCR.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **score** (`float`) -- The probability associated to the answer.
- **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided
`word_boxes`).
- **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided
`word_boxes`).
- **answer** (`str`) -- The answer to the question.
- **words** (`list[int]`) -- The index of each word/box pair that is in the answer
"""
if isinstance(question, str):
inputs = {"question": question, "image": image}
if word_boxes is not None:
inputs["word_boxes"] = word_boxes
else:
inputs = image
return super().__call__(inputs, **kwargs)
def preprocess(
self,
input,
padding="do_not_pad",
doc_stride=None,
max_seq_len=None,
word_boxes: Optional[tuple[str, list[float]]] = None,
lang=None,
tesseract_config="",
timeout=None,
):
# NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR
# to support documents with enough tokens that overflow the model's window
if max_seq_len is None:
max_seq_len = self.tokenizer.model_max_length
if doc_stride is None:
doc_stride = min(max_seq_len // 2, 256)
image = None
image_features = {}
if input.get("image", None) is not None:
image = load_image(input["image"], timeout=timeout)
if self.image_processor is not None:
image_inputs = self.image_processor(images=image, return_tensors=self.framework)
if self.framework == "pt":
image_inputs = image_inputs.to(self.dtype)
image_features.update(image_inputs)
elif self.feature_extractor is not None:
image_features.update(self.feature_extractor(images=image, return_tensors=self.framework))
elif self.model_type == ModelType.VisionEncoderDecoder:
raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor")
words, boxes = None, None
if self.model_type != ModelType.VisionEncoderDecoder:
if "word_boxes" in input:
words = [x[0] for x in input["word_boxes"]]
boxes = [x[1] for x in input["word_boxes"]]
elif "words" in image_features and "boxes" in image_features:
words = image_features.pop("words")[0]
boxes = image_features.pop("boxes")[0]
elif image is not None:
if not TESSERACT_LOADED:
raise ValueError(
"If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract,"
" but pytesseract is not available"
)
if TESSERACT_LOADED:
words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config)
else:
raise ValueError(
"You must provide an image or word_boxes. If you provide an image, the pipeline will automatically"
" run OCR to derive words and boxes"
)
if self.tokenizer.padding_side != "right":
raise ValueError(
"Document question answering only supports tokenizers whose padding side is 'right', not"
f" {self.tokenizer.padding_side}"
)
if self.model_type == ModelType.VisionEncoderDecoder:
task_prompt = f"<s_docvqa><s_question>{input['question']}</s_question><s_answer>"
# Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py
encoding = {
"inputs": image_features["pixel_values"],
"decoder_input_ids": self.tokenizer(
task_prompt, add_special_tokens=False, return_tensors=self.framework
).input_ids,
"return_dict_in_generate": True,
}
yield {
**encoding,
"p_mask": None,
"word_ids": None,
"words": None,
"output_attentions": True,
"is_last": True,
}
else:
tokenizer_kwargs = {}
if self.model_type == ModelType.LayoutLM:
tokenizer_kwargs["text"] = input["question"].split()
tokenizer_kwargs["text_pair"] = words
tokenizer_kwargs["is_split_into_words"] = True
else:
tokenizer_kwargs["text"] = [input["question"]]
tokenizer_kwargs["text_pair"] = [words]
tokenizer_kwargs["boxes"] = [boxes]
encoding = self.tokenizer(
padding=padding,
max_length=max_seq_len,
stride=doc_stride,
return_token_type_ids=True,
truncation="only_second",
return_overflowing_tokens=True,
**tokenizer_kwargs,
)
# TODO: check why slower `LayoutLMTokenizer` and `LayoutLMv2Tokenizer` don't have this key in outputs
# FIXME: ydshieh and/or Narsil
encoding.pop("overflow_to_sample_mapping", None) # We do not use this
num_spans = len(encoding["input_ids"])
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# We put 0 on the tokens from the context and 1 everywhere else (question and special tokens)
# This logic mirrors the logic in the question_answering pipeline
p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)]
for span_idx in range(num_spans):
if self.framework == "pt":
span_encoding = {k: torch.tensor(v[span_idx : span_idx + 1]) for (k, v) in encoding.items()}
if "pixel_values" in image_features:
span_encoding["image"] = image_features["pixel_values"]
else:
raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline")
input_ids_span_idx = encoding["input_ids"][span_idx]
# keep the cls_token unmasked (some models use it to indicate unanswerable questions)
if self.tokenizer.cls_token_id is not None:
cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0]
for cls_index in cls_indices:
p_mask[span_idx][cls_index] = 0
# For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000]
# for SEP tokens, and the word's bounding box for words in the original document.
if "boxes" not in tokenizer_kwargs:
bbox = []
for input_id, sequence_id, word_id in zip(
encoding.input_ids[span_idx],
encoding.sequence_ids(span_idx),
encoding.word_ids(span_idx),
):
if sequence_id == 1:
bbox.append(boxes[word_id])
elif input_id == self.tokenizer.sep_token_id:
bbox.append([1000] * 4)
else:
bbox.append([0] * 4)
if self.framework == "pt":
span_encoding["bbox"] = torch.tensor(bbox).unsqueeze(0)
elif self.framework == "tf":
raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline")
yield {
**span_encoding,
"p_mask": p_mask[span_idx],
"word_ids": encoding.word_ids(span_idx),
"words": words,
"is_last": span_idx == num_spans - 1,
}
def _forward(self, model_inputs, **generate_kwargs):
p_mask = model_inputs.pop("p_mask", None)
word_ids = model_inputs.pop("word_ids", None)
words = model_inputs.pop("words", None)
is_last = model_inputs.pop("is_last", False)
if self.model_type == ModelType.VisionEncoderDecoder:
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
model_outputs = self.model.generate(**model_inputs, **generate_kwargs)
else:
model_outputs = self.model(**model_inputs)
model_outputs = dict(model_outputs.items())
model_outputs["p_mask"] = p_mask
model_outputs["word_ids"] = word_ids
model_outputs["words"] = words
model_outputs["attention_mask"] = model_inputs.get("attention_mask", None)
model_outputs["is_last"] = is_last
return model_outputs
def postprocess(self, model_outputs, top_k=1, **kwargs):
if self.model_type == ModelType.VisionEncoderDecoder:
answers = [self.postprocess_encoder_decoder_single(o) for o in model_outputs]
else:
answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs)
answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k]
return answers
def postprocess_encoder_decoder_single(self, model_outputs, **kwargs):
sequence = self.tokenizer.batch_decode(model_outputs["sequences"])[0]
# TODO: A lot of this logic is specific to Donut and should probably be handled in the tokenizer
# (see https://github.com/huggingface/transformers/pull/18414/files#r961747408 for more context).
sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "")
sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token
ret = {
"answer": None,
}
answer = re.search(r"<s_answer>(.*)</s_answer>", sequence)
if answer is not None:
ret["answer"] = answer.group(1).strip()
return ret
def postprocess_extractive_qa(
self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs
):
min_null_score = 1000000 # large and positive
answers = []
for output in model_outputs:
words = output["words"]
if self.framework == "pt" and output["start_logits"].dtype in (torch.bfloat16, torch.float16):
output["start_logits"] = output["start_logits"].float()
if self.framework == "pt" and output["end_logits"].dtype in (torch.bfloat16, torch.float16):
output["end_logits"] = output["end_logits"].float()
starts, ends, scores, min_null_score = select_starts_ends(
start=output["start_logits"],
end=output["end_logits"],
p_mask=output["p_mask"],
attention_mask=output["attention_mask"].numpy()
if output.get("attention_mask", None) is not None
else None,
min_null_score=min_null_score,
top_k=top_k,
handle_impossible_answer=handle_impossible_answer,
max_answer_len=max_answer_len,
)
word_ids = output["word_ids"]
for start, end, score in zip(starts, ends, scores):
word_start, word_end = word_ids[start], word_ids[end]
if word_start is not None and word_end is not None:
answers.append(
{
"score": float(score),
"answer": " ".join(words[word_start : word_end + 1]),
"start": word_start,
"end": word_end,
}
)
if handle_impossible_answer:
answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0})
return answers
| transformers/src/transformers/pipelines/document_question_answering.py/0 | {
"file_path": "transformers/src/transformers/pipelines/document_question_answering.py",
"repo_id": "transformers",
"token_count": 11239
} | 555 |
import enum
import itertools
import types
from typing import Any, overload
from ..generation import GenerationConfig
from ..utils import ModelOutput, add_end_docstrings, is_tf_available, is_torch_available
from .base import Pipeline, build_pipeline_init_args
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from .pt_utils import KeyDataset
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
ChatType = list[dict[str, str]]
class ReturnType(enum.Enum):
TENSORS = 0
NEW_TEXT = 1
FULL_TEXT = 2
class Chat:
"""This class is intended to just be used internally in this pipeline and not exposed to users. We convert chats
to this format because the rest of the pipeline code tends to assume that lists of messages are
actually a batch of samples rather than messages in the same conversation."""
def __init__(self, messages: dict):
for message in messages:
if not ("role" in message and "content" in message):
raise ValueError("When passing chat dicts as input, each dict must have a 'role' and 'content' key.")
self.messages = messages
@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
class TextGenerationPipeline(Pipeline):
"""
Language generation pipeline using any `ModelWithLMHead` or `ModelForCausalLM`. This pipeline predicts the words
that will follow a specified text prompt. When the underlying model is a conversational model, it can also accept
one or more chats, in which case the pipeline will operate in chat mode and will continue the chat(s) by adding
its response(s). Each chat takes the form of a list of dicts, where each dict contains "role" and "content" keys.
Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
- do_sample: True
- temperature: 0.7
Examples:
```python
>>> from transformers import pipeline
>>> generator = pipeline(model="openai-community/gpt2")
>>> generator("I can't believe you did such a ", do_sample=False)
[{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}]
>>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions.
>>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False)
```
```python
>>> from transformers import pipeline
>>> generator = pipeline(model="HuggingFaceH4/zephyr-7b-beta")
>>> # Zephyr-beta is a conversational model, so let's pass it a chat instead of a single string
>>> generator([{"role": "user", "content": "What is the capital of France? Answer in one word."}], do_sample=False, max_new_tokens=2)
[{'generated_text': [{'role': 'user', 'content': 'What is the capital of France? Answer in one word.'}, {'role': 'assistant', 'content': 'Paris'}]}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
text generation parameters in [Text generation strategies](../generation_strategies) and [Text
generation](text_generation).
This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"text-generation"`.
The models that this pipeline can use are models that have been trained with an autoregressive language modeling
objective. See the list of available [text completion models](https://huggingface.co/models?filter=text-generation)
and the list of [conversational models](https://huggingface.co/models?other=conversational)
on [huggingface.co/models].
"""
# Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
XL_PREFIX = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
_pipeline_calls_generate = True
_load_processor = False
_load_image_processor = False
_load_feature_extractor = False
_load_tokenizer = True
# Make sure the docstring is updated when the default generation config is changed
_default_generation_config = GenerationConfig(
max_new_tokens=256,
do_sample=True, # free-form text generation often uses sampling
temperature=0.7,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
prefix = None
if self.prefix is not None:
prefix = self.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
prefix = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params)
self._preprocess_params = {**self._preprocess_params, **preprocess_params}
self._forward_params = {**self._forward_params, **forward_params}
def _sanitize_parameters(
self,
return_full_text=None,
return_tensors=None,
return_text=None,
return_type=None,
clean_up_tokenization_spaces=None,
prefix=None,
handle_long_generation=None,
stop_sequence=None,
truncation=None,
max_length=None,
continue_final_message=None,
skip_special_tokens=None,
**generate_kwargs,
):
# preprocess kwargs
preprocess_params = {}
add_special_tokens = False
if "add_special_tokens" in generate_kwargs:
add_special_tokens = preprocess_params["add_special_tokens"] = generate_kwargs.pop("add_special_tokens")
if "padding" in generate_kwargs:
preprocess_params["padding"] = generate_kwargs.pop("padding")
if truncation is not None:
preprocess_params["truncation"] = truncation
if max_length is not None:
preprocess_params["max_length"] = max_length
generate_kwargs["max_length"] = max_length
if prefix is not None:
preprocess_params["prefix"] = prefix
if prefix:
prefix_inputs = self.tokenizer(
prefix, padding=False, add_special_tokens=add_special_tokens, return_tensors=self.framework
)
generate_kwargs["prefix_length"] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
" [None, 'hole']"
)
preprocess_params["handle_long_generation"] = handle_long_generation
if continue_final_message is not None:
preprocess_params["continue_final_message"] = continue_final_message
preprocess_params.update(generate_kwargs)
# forward kwargs
if stop_sequence is not None:
stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
generate_kwargs["eos_token_id"] = stop_sequence_ids
forward_params = generate_kwargs
if self.assistant_model is not None:
forward_params["assistant_model"] = self.assistant_model
if self.assistant_tokenizer is not None:
forward_params["tokenizer"] = self.tokenizer
forward_params["assistant_tokenizer"] = self.assistant_tokenizer
# postprocess kwargs
postprocess_params = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
return_type = ReturnType.TENSORS
if return_type is not None:
postprocess_params["return_type"] = return_type
if clean_up_tokenization_spaces is not None:
postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
if continue_final_message is not None:
postprocess_params["continue_final_message"] = continue_final_message
if skip_special_tokens is not None:
postprocess_params["skip_special_tokens"] = skip_special_tokens
return preprocess_params, forward_params, postprocess_params
# overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
def _parse_and_tokenize(self, *args, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True})
return super()._parse_and_tokenize(*args, **kwargs)
@overload
def __call__(self, text_inputs: str, **kwargs: Any) -> list[dict[str, str]]: ...
@overload
def __call__(self, text_inputs: list[str], **kwargs: Any) -> list[list[dict[str, str]]]: ...
@overload
def __call__(self, text_inputs: ChatType, **kwargs: Any) -> list[dict[str, ChatType]]: ...
@overload
def __call__(self, text_inputs: list[ChatType], **kwargs: Any) -> list[list[dict[str, ChatType]]]: ...
def __call__(self, text_inputs, **kwargs):
"""
Complete the prompt(s) given as inputs.
Args:
text_inputs (`str`, `list[str]`, list[dict[str, str]], or `list[list[dict[str, str]]]`):
One or several prompts (or one list of prompts) to complete. If strings or a list of string are
passed, this pipeline will continue each prompt. Alternatively, a "chat", in the form of a list
of dicts with "role" and "content" keys, can be passed, or a list of such chats. When chats are passed,
the model's chat template will be used to format them before passing them to the model.
return_tensors (`bool`, *optional*, defaults to `False`):
Returns the tensors of predictions (as token indices) in the outputs. If set to
`True`, the decoded text is not returned.
return_text (`bool`, *optional*):
Returns the decoded texts in the outputs.
return_full_text (`bool`, *optional*, defaults to `True`):
If set to `False` only added text is returned, otherwise the full text is returned. Cannot be
specified at the same time as `return_text`.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to clean up the potential extra spaces in the text output.
continue_final_message( `bool`, *optional*): This indicates that you want the model to continue the
last message in the input chat rather than starting a new one, allowing you to "prefill" its response.
By default this is `True` when the final message in the input chat has the `assistant` role and
`False` otherwise, but you can manually override that behaviour by setting this flag.
prefix (`str`, *optional*):
Prefix added to prompt.
handle_long_generation (`str`, *optional*):
By default, this pipelines does not handle long generation (ones that exceed in one form or the other
the model maximum length). There is no perfect way to address this (more info
:https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common
strategies to work around that problem depending on your use case.
- `None` : default strategy where nothing in particular happens
- `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might
truncate a lot of the prompt and not suitable when generation exceed the model capacity)
generate_kwargs (`dict`, *optional*):
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./text_generation)).
Return:
A list or a list of lists of `dict`: Returns one of the following dictionaries (cannot return a combination
of both `generated_text` and `generated_token_ids`):
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
"""
if isinstance(
text_inputs,
(list, tuple, types.GeneratorType, KeyDataset)
if is_torch_available()
else (list, tuple, types.GeneratorType),
):
if isinstance(text_inputs, types.GeneratorType):
text_inputs, _ = itertools.tee(text_inputs)
text_inputs, first_item = (x for x in text_inputs), next(_)
else:
first_item = text_inputs[0]
if isinstance(first_item, (list, tuple, dict)):
# We have one or more prompts in list-of-dicts format, so this is chat mode
if isinstance(first_item, dict):
return super().__call__(Chat(text_inputs), **kwargs)
else:
chats = (Chat(chat) for chat in text_inputs) # 🐈 🐈 🐈
if isinstance(text_inputs, types.GeneratorType):
return super().__call__(chats, **kwargs)
else:
return super().__call__(list(chats), **kwargs)
return super().__call__(text_inputs, **kwargs)
def preprocess(
self,
prompt_text,
prefix="",
handle_long_generation=None,
add_special_tokens=None,
truncation=None,
padding=None,
max_length=None,
continue_final_message=None,
**generate_kwargs,
):
# Only set non-None tokenizer kwargs, so as to rely on the tokenizer's defaults
tokenizer_kwargs = {
"add_special_tokens": add_special_tokens,
"truncation": truncation,
"padding": padding,
"max_length": max_length, # TODO: name clash -- this is broken, `max_length` is also a `generate` arg
}
tokenizer_kwargs = {key: value for key, value in tokenizer_kwargs.items() if value is not None}
if isinstance(prompt_text, Chat):
tokenizer_kwargs.pop("add_special_tokens", None) # ignore add_special_tokens on chats
# If the user passes a chat that ends in an assistant message, we treat it as a prefill by default
# because very few models support multiple separate, consecutive assistant messages
if continue_final_message is None:
continue_final_message = prompt_text.messages[-1]["role"] == "assistant"
inputs = self.tokenizer.apply_chat_template(
prompt_text.messages,
add_generation_prompt=not continue_final_message,
continue_final_message=continue_final_message,
return_dict=True,
return_tensors=self.framework,
**tokenizer_kwargs,
)
else:
inputs = self.tokenizer(prefix + prompt_text, return_tensors=self.framework, **tokenizer_kwargs)
inputs["prompt_text"] = prompt_text
if handle_long_generation == "hole":
cur_len = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
new_tokens = generate_kwargs["max_new_tokens"]
else:
new_tokens = generate_kwargs.get("max_length", self.generation_config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected")
if cur_len + new_tokens > self.tokenizer.model_max_length:
keep_length = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length"
)
inputs["input_ids"] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _forward(self, model_inputs, **generate_kwargs):
input_ids = model_inputs["input_ids"]
attention_mask = model_inputs.get("attention_mask", None)
# Allow empty prompts
if input_ids.shape[1] == 0:
input_ids = None
attention_mask = None
in_b = 1
else:
in_b = input_ids.shape[0]
prompt_text = model_inputs.pop("prompt_text")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
prefix_length = generate_kwargs.pop("prefix_length", 0)
if prefix_length > 0:
has_max_new_tokens = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
generate_kwargs["max_length"] = generate_kwargs.get("max_length") or self.generation_config.max_length
generate_kwargs["max_length"] += prefix_length
has_min_new_tokens = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
output = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs)
if isinstance(output, ModelOutput):
generated_sequence = output.sequences
other_outputs = {k: v for k, v in output.items() if k not in {"sequences", "past_key_values"}}
out_b = generated_sequence.shape[0]
if self.framework == "pt":
for key, value in other_outputs.items():
if isinstance(value, torch.Tensor) and value.shape[0] == out_b:
other_outputs[key] = value.reshape(in_b, out_b // in_b, *value.shape[1:])
if isinstance(value, tuple) and len(value[0]) == out_b:
value = torch.stack(value).swapaxes(0, 1)
other_outputs[key] = value
elif self.framework == "tf":
for key, value in other_outputs.items():
if isinstance(value, tf.Tensor) and value.shape[0] == out_b:
other_outputs[key] = tf.reshape(value, (in_b, out_b // in_b, *value.shape[1:]))
if isinstance(value, tuple) and len(value[0]) == out_b:
value = tf.stack(value).swapaxes(0, 1)
other_outputs[key] = value
else:
generated_sequence = output
other_outputs = {}
out_b = generated_sequence.shape[0]
if self.framework == "pt":
generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:])
elif self.framework == "tf":
generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
model_outputs = {
"generated_sequence": generated_sequence,
"input_ids": input_ids,
"prompt_text": prompt_text,
}
if other_outputs:
model_outputs.update({"additional_outputs": other_outputs})
return model_outputs
def postprocess(
self,
model_outputs,
return_type=ReturnType.FULL_TEXT,
clean_up_tokenization_spaces=True,
continue_final_message=None,
skip_special_tokens=None,
):
generated_sequence = model_outputs["generated_sequence"][0]
input_ids = model_outputs["input_ids"]
prompt_text = model_outputs["prompt_text"]
generated_sequence = generated_sequence.numpy().tolist()
records = []
other_outputs = model_outputs.get("additional_outputs", {})
splitted_keys = {}
if other_outputs:
if self.framework == "pt":
for k, v in other_outputs.items():
if isinstance(v, torch.Tensor) and v.shape[0] == len(generated_sequence):
splitted_keys[k] = v.numpy().tolist()
elif self.framework == "tf":
for k, v in other_outputs.items():
if isinstance(v, tf.Tensor) and v.shape[0] == len(generated_sequence):
splitted_keys[k] = v.numpy().tolist()
skip_special_tokens = skip_special_tokens if skip_special_tokens is not None else True
for idx, sequence in enumerate(generated_sequence):
if return_type == ReturnType.TENSORS:
record = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
text = self.tokenizer.decode(
sequence,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
prompt_length = 0
else:
prompt_length = len(
self.tokenizer.decode(
input_ids[0],
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
all_text = text[prompt_length:]
if return_type == ReturnType.FULL_TEXT:
if isinstance(prompt_text, str):
all_text = prompt_text + all_text
elif isinstance(prompt_text, Chat):
if continue_final_message is None:
# If the user passes a chat ending in an assistant message, we treat it as a prefill by
# default because very few models support multiple separate, consecutive assistant messages
continue_final_message = prompt_text.messages[-1]["role"] == "assistant"
if continue_final_message:
# With assistant prefill, concat onto the end of the last message
all_text = list(prompt_text.messages)[:-1] + [
{
"role": prompt_text.messages[-1]["role"],
"content": prompt_text.messages[-1]["content"] + all_text,
}
]
else:
# When we're not starting from a prefill, the output is a new assistant message
all_text = list(prompt_text.messages) + [{"role": "assistant", "content": all_text}]
record = {"generated_text": all_text}
for key, values in splitted_keys.items():
record[key] = values[idx]
records.append(record)
return records
| transformers/src/transformers/pipelines/text_generation.py/0 | {
"file_path": "transformers/src/transformers/pipelines/text_generation.py",
"repo_id": "transformers",
"token_count": 11533
} | 556 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_auto_round_available, is_torch_available, logging
from ..utils.quantization_config import QuantizationConfigMixin
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class AutoRoundQuantizer(HfQuantizer):
"""
Quantizer of the AutoRound method. (https://huggingface.co/papers/2309.05516)
"""
# AutoRound requires data calibration - we support only inference
requires_calibration = True
required_packages = ["auto_round"]
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
def validate_environment(self, *args, **kwargs):
self.device_map = kwargs.get("device_map")
if not is_auto_round_available():
raise ImportError(
"Loading an AutoRound quantized model requires auto-round library (`pip install 'auto-round>=0.5'`)"
)
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
if dtype is None:
dtype = torch.bfloat16
logger.info("Loading the model in `torch.bfloat16`. To overwrite it, set `dtype` manually.")
return dtype
def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
if model.__class__.main_input_name != "input_ids":
logger.warning("AutoRound offers only limited support for models that are not strictly text-based.")
from auto_round.inference.convert_model import convert_hf_model, infer_target_device
if self.pre_quantized:
target_device = infer_target_device(self.device_map)
model, used_backends = convert_hf_model(model, target_device)
self.used_backends = used_backends
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
if self.pre_quantized:
from auto_round.inference.convert_model import post_init
post_init(model, self.used_backends)
else:
raise ValueError("AutoRound only sports pre-quantized models.")
@property
def is_trainable(self) -> bool:
return False
def is_serializable(self, safe_serialization=None):
## for gptq/awq models, the quantization config will be changed
return True
| transformers/src/transformers/quantizers/quantizer_auto_round.py/0 | {
"file_path": "transformers/src/transformers/quantizers/quantizer_auto_round.py",
"repo_id": "transformers",
"token_count": 1098
} | 557 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/lic enses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Optional
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..integrations import replace_with_spqr_linear
from ..utils import is_accelerate_available, is_spqr_available, is_torch_available, logging
from ..utils.quantization_config import QuantizationConfigMixin
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class SpQRHfQuantizer(HfQuantizer):
"""
Quantizer of the SpQR method. Enables the loading of prequantized models.
"""
requires_calibration = True
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not torch.cuda.is_available():
raise RuntimeError("GPU is required to run SpQR quantized model.")
if not is_accelerate_available():
raise ImportError("Using `spqr` quantization requires Accelerate: `pip install accelerate`")
if not is_spqr_available():
raise ImportError("Using `spqr` quantization requires SpQR: `pip install spqr_quant[gpu]`")
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
if dtype is None:
dtype = torch.float16
logger.info("Assuming SpQR inference on GPU and loading the model in `torch.float16`.")
elif dtype != torch.float16:
raise ValueError(
"You cannot use any type other than torch.float16 for SpQR. Please either leave it None or set it to"
"torch.float16 explicitly."
)
return dtype
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[list[str]] = None,
**kwargs,
):
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
replace_with_spqr_linear(
model,
quantization_config=self.quantization_config,
modules_to_not_convert=self.modules_to_not_convert,
)
model.config.quantization_config = self.quantization_config
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
@property
def is_trainable(self):
return False
def is_serializable(self, safe_serialization=None):
return True
| transformers/src/transformers/quantizers/quantizer_spqr.py/0 | {
"file_path": "transformers/src/transformers/quantizers/quantizer_spqr.py",
"repo_id": "transformers",
"token_count": 1205
} | 558 |
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Callbacks to use with the Trainer class and customize the training loop.
"""
import dataclasses
import json
import math
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from tqdm.auto import tqdm
from .trainer_utils import HPSearchBackend, IntervalStrategy, SaveStrategy, has_length
from .training_args import TrainingArguments
from .utils import logging
logger = logging.get_logger(__name__)
@dataclass
class TrainerState:
"""
A class containing the [`Trainer`] inner state that will be saved along the model and optimizer when checkpointing
and passed to the [`TrainerCallback`].
<Tip>
In all this class, one step is to be understood as one update step. When using gradient accumulation, one update
step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`, then one update
step requires going through *n* batches.
</Tip>
Args:
epoch (`float`, *optional*):
Only set during training, will represent the epoch the training is at (the decimal part being the
percentage of the current epoch completed).
global_step (`int`, *optional*, defaults to 0):
During training, represents the number of update steps completed.
max_steps (`int`, *optional*, defaults to 0):
The number of update steps to do during the current training.
logging_steps (`int`, *optional*, defaults to 500):
Log every X updates steps
eval_steps (`int`, *optional*):
Run an evaluation every X steps.
save_steps (`int`, *optional*, defaults to 500):
Save checkpoint every X updates steps.
train_batch_size (`int`, *optional*):
The batch size for the training dataloader. Only needed when
`auto_find_batch_size` has been used.
num_input_tokens_seen (`int`, *optional*, defaults to 0):
When tracking the inputs tokens, the number of tokens seen during training (number of input tokens, not the
number of prediction tokens).
total_flos (`float`, *optional*, defaults to 0):
The total number of floating operations done by the model since the beginning of training (stored as floats
to avoid overflow).
log_history (`list[dict[str, float]]`, *optional*):
The list of logs done since the beginning of training.
best_metric (`float`, *optional*):
When tracking the best model, the value of the best metric encountered so far.
best_global_step (`int`, *optional*):
When tracking the best model, the step at which the best metric was encountered.
Used for setting `best_model_checkpoint`.
best_model_checkpoint (`str`, *optional*):
When tracking the best model, the value of the name of the checkpoint for the best model encountered so
far.
is_local_process_zero (`bool`, *optional*, defaults to `True`):
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on
several machines) main process.
is_world_process_zero (`bool`, *optional*, defaults to `True`):
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be `True` for one process).
is_hyper_param_search (`bool`, *optional*, defaults to `False`):
Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will
impact the way data will be logged in TensorBoard.
stateful_callbacks (`list[StatefulTrainerCallback]`, *optional*):
Callbacks attached to the `Trainer` that should have their states be saved or restored.
Relevant callbacks should implement a `state` and `from_state` function.
"""
epoch: Optional[float] = None
global_step: int = 0
max_steps: int = 0
logging_steps: int = 500
eval_steps: int = 500
save_steps: int = 500
train_batch_size: Optional[int] = None
num_train_epochs: int = 0
num_input_tokens_seen: int = 0
total_flos: float = 0
log_history: list[dict[str, float]] = None
best_metric: Optional[float] = None
best_global_step: Optional[int] = None
best_model_checkpoint: Optional[str] = None
is_local_process_zero: bool = True
is_world_process_zero: bool = True
is_hyper_param_search: bool = False
trial_name: Optional[str] = None
trial_params: dict[str, Union[str, float, int, bool]] = None
stateful_callbacks: list["TrainerCallback"] = None
def __post_init__(self):
if self.log_history is None:
self.log_history = []
if self.stateful_callbacks is None:
self.stateful_callbacks = {}
elif isinstance(self.stateful_callbacks, dict):
# We are loading the callbacks in from the state file, no need to process them
pass
else:
# Saveable callbacks get stored as dict of kwargs
stateful_callbacks = {}
for callback in self.stateful_callbacks:
if not isinstance(callback, (ExportableState)):
raise TypeError(
f"All callbacks passed to be saved must inherit `ExportableState`, but received {type(callback)}"
)
name = callback.__class__.__name__
if name in stateful_callbacks:
# We can have multiple versions of the same callback
# if so, we store them as a list of states to restore
if not isinstance(stateful_callbacks[name], list):
stateful_callbacks[name] = [stateful_callbacks[name]]
stateful_callbacks[name].append(callback.state())
else:
stateful_callbacks[name] = callback.state()
self.stateful_callbacks = stateful_callbacks
def save_to_json(self, json_path: str):
"""Save the content of this instance in JSON format inside `json_path`."""
json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n"
with open(json_path, "w", encoding="utf-8") as f:
f.write(json_string)
@classmethod
def load_from_json(cls, json_path: str):
"""Create an instance from the content of `json_path`."""
with open(json_path, encoding="utf-8") as f:
text = f.read()
return cls(**json.loads(text))
def compute_steps(self, args, max_steps):
"""
Calculates and stores the absolute value for logging,
eval, and save steps based on if it was a proportion
or not.
"""
for step_kind in ("logging", "eval", "save"):
num_steps = getattr(args, f"{step_kind}_steps")
if num_steps is not None:
if num_steps < 1:
num_steps = math.ceil(max_steps * num_steps)
setattr(self, f"{step_kind}_steps", num_steps)
def init_training_references(self, trainer, max_steps, num_train_epochs, trial):
"""
Stores the initial training references needed in `self`
"""
if trainer.hp_name is not None and trainer._trial is not None:
# use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial
# parameter to Train when using DDP.
self.trial_name = trainer.hp_name(trainer._trial)
self.trial_params = None
if trial is not None:
from transformers.integrations import hp_params
assignments = trial.assignments if trainer.hp_search_backend == HPSearchBackend.SIGOPT else trial
self.trial_params = hp_params(assignments)
self.max_steps = max_steps
self.num_train_epochs = num_train_epochs
self.is_local_process_zero = trainer.is_local_process_zero()
self.is_world_process_zero = trainer.is_world_process_zero()
class ExportableState:
"""
A class for objects that include the ability to have its state
be saved during `Trainer._save_checkpoint` and loaded back in during
`Trainer._load_from_checkpoint`.
These must implement a `state` function that gets called during the respective
Trainer function call. It should only include parameters and attributes needed to
recreate the state at a particular time, to avoid utilizing pickle/maintain standard
file IO writing.
Example:
```python
class EarlyStoppingCallback(TrainerCallback, ExportableState):
def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
self.early_stopping_patience = early_stopping_patience
self.early_stopping_threshold = early_stopping_threshold
# early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
self.early_stopping_patience_counter = 0
def state(self) -> dict:
return {
"args": {
"early_stopping_patience": self.early_stopping_patience,
"early_stopping_threshold": self.early_stopping_threshold,
},
"attributes": {
"early_stopping_patience_counter": self.early_stopping_patience_counter,
}
}
```"""
def state(self) -> dict:
raise NotImplementedError("You must implement a `state` function to utilize this class.")
@classmethod
def from_state(cls, state):
instance = cls(**state["args"])
for k, v in state["attributes"].items():
setattr(instance, k, v)
return instance
@dataclass
class TrainerControl(ExportableState):
"""
A class that handles the [`Trainer`] control flow. This class is used by the [`TrainerCallback`] to activate some
switches in the training loop.
Args:
should_training_stop (`bool`, *optional*, defaults to `False`):
Whether or not the training should be interrupted.
If `True`, this variable will not be set back to `False`. The training will just stop.
should_epoch_stop (`bool`, *optional*, defaults to `False`):
Whether or not the current epoch should be interrupted.
If `True`, this variable will be set back to `False` at the beginning of the next epoch.
should_save (`bool`, *optional*, defaults to `False`):
Whether or not the model should be saved at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
should_evaluate (`bool`, *optional*, defaults to `False`):
Whether or not the model should be evaluated at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
should_log (`bool`, *optional*, defaults to `False`):
Whether or not the logs should be reported at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
"""
should_training_stop: bool = False
should_epoch_stop: bool = False
should_save: bool = False
should_evaluate: bool = False
should_log: bool = False
def _new_training(self):
"""Internal method that resets the variable for a new training."""
self.should_training_stop = False
def _new_epoch(self):
"""Internal method that resets the variable for a new epoch."""
self.should_epoch_stop = False
def _new_step(self):
"""Internal method that resets the variable for a new step."""
self.should_save = False
self.should_evaluate = False
self.should_log = False
def state(self) -> dict:
return {
"args": {
"should_training_stop": self.should_training_stop,
"should_epoch_stop": self.should_epoch_stop,
"should_save": self.should_save,
"should_evaluate": self.should_evaluate,
"should_log": self.should_log,
},
"attributes": {},
}
class TrainerCallback:
# no-format
"""
A class for objects that will inspect the state of the training loop at some events and take some decisions. At
each of those events the following arguments are available:
Args:
args ([`TrainingArguments`]):
The training arguments used to instantiate the [`Trainer`].
state ([`TrainerState`]):
The current state of the [`Trainer`].
control ([`TrainerControl`]):
The object that is returned to the [`Trainer`] and can be used to make some decisions.
model ([`PreTrainedModel`] or `torch.nn.Module`):
The model being trained.
tokenizer ([`PreTrainedTokenizer`]):
The tokenizer used for encoding the data. This is deprecated in favour of `processing_class`.
processing_class ([`PreTrainedTokenizer` or `BaseImageProcessor` or `ProcessorMixin` or `FeatureExtractionMixin`]):
The processing class used for encoding the data. Can be a tokenizer, a processor, an image processor or a feature extractor.
optimizer (`torch.optim.Optimizer`):
The optimizer used for the training steps.
lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`):
The scheduler used for setting the learning rate.
train_dataloader (`torch.utils.data.DataLoader`, *optional*):
The current dataloader used for training.
eval_dataloader (`torch.utils.data.DataLoader`, *optional*):
The current dataloader used for evaluation.
metrics (`dict[str, float]`):
The metrics computed by the last evaluation phase.
Those are only accessible in the event `on_evaluate`.
logs (`dict[str, float]`):
The values to log.
Those are only accessible in the event `on_log`.
The `control` object is the only one that can be changed by the callback, in which case the event that changes it
should return the modified version.
The argument `args`, `state` and `control` are positionals for all events, all the others are grouped in `kwargs`.
You can unpack the ones you need in the signature of the event using them. As an example, see the code of the
simple [`~transformers.PrinterCallback`].
Example:
```python
class PrinterCallback(TrainerCallback):
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop("total_flos", None)
if state.is_local_process_zero:
print(logs)
```"""
def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of the initialization of the [`Trainer`].
"""
pass
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of training.
"""
pass
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of training.
"""
pass
def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of an epoch.
"""
pass
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of an epoch.
"""
pass
def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of a training step. If using gradient accumulation, one training step might take
several inputs.
"""
pass
def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called before the optimizer step but after gradient clipping. Useful for monitoring gradients.
"""
pass
def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after the optimizer step but before gradients are zeroed out. Useful for monitoring gradients.
"""
pass
def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of an substep during gradient accumulation.
"""
pass
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of a training step. If using gradient accumulation, one training step might take
several inputs.
"""
pass
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after an evaluation phase.
"""
pass
def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs):
"""
Event called after a successful prediction.
"""
pass
def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after a checkpoint save.
"""
pass
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after logging the last logs.
"""
pass
def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after a prediction step.
"""
pass
class CallbackHandler(TrainerCallback):
"""Internal class that just calls the list of callbacks in order."""
def __init__(self, callbacks, model, processing_class, optimizer, lr_scheduler):
self.callbacks = []
for cb in callbacks:
self.add_callback(cb)
self.model = model
self.processing_class = processing_class
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.train_dataloader = None
self.eval_dataloader = None
if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks):
logger.warning(
"The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n"
+ "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of"
+ "callbacks is\n:"
+ self.callback_list
)
def add_callback(self, callback):
cb = callback() if isinstance(callback, type) else callback
cb_class = callback if isinstance(callback, type) else callback.__class__
if cb_class in [c.__class__ for c in self.callbacks]:
logger.warning(
f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current"
+ "list of callbacks is\n:"
+ self.callback_list
)
self.callbacks.append(cb)
def pop_callback(self, callback):
if isinstance(callback, type):
for cb in self.callbacks:
if isinstance(cb, callback):
self.callbacks.remove(cb)
return cb
else:
for cb in self.callbacks:
if cb == callback:
self.callbacks.remove(cb)
return cb
def remove_callback(self, callback):
if isinstance(callback, type):
for cb in self.callbacks:
if isinstance(cb, callback):
self.callbacks.remove(cb)
return
else:
self.callbacks.remove(callback)
@property
def callback_list(self):
return "\n".join(cb.__class__.__name__ for cb in self.callbacks)
def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_init_end", args, state, control)
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_training_stop = False
return self.call_event("on_train_begin", args, state, control)
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_train_end", args, state, control)
def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_epoch_stop = False
return self.call_event("on_epoch_begin", args, state, control)
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_epoch_end", args, state, control)
def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_log = False
control.should_evaluate = False
control.should_save = False
return self.call_event("on_step_begin", args, state, control)
def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_pre_optimizer_step", args, state, control)
def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_optimizer_step", args, state, control)
def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_substep_end", args, state, control)
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_step_end", args, state, control)
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics):
control.should_evaluate = False
return self.call_event("on_evaluate", args, state, control, metrics=metrics)
def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics):
return self.call_event("on_predict", args, state, control, metrics=metrics)
def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_save = False
return self.call_event("on_save", args, state, control)
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs):
control.should_log = False
return self.call_event("on_log", args, state, control, logs=logs)
def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_prediction_step", args, state, control)
def call_event(self, event, args, state, control, **kwargs):
for callback in self.callbacks:
result = getattr(callback, event)(
args,
state,
control,
model=self.model,
processing_class=self.processing_class,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
train_dataloader=self.train_dataloader,
eval_dataloader=self.eval_dataloader,
**kwargs,
)
# A Callback can skip the return of `control` if it doesn't change it.
if result is not None:
control = result
return control
class DefaultFlowCallback(TrainerCallback):
"""
A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation and checkpoints.
"""
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# Log
if state.global_step == 1 and args.logging_first_step:
control.should_log = True
if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % state.logging_steps == 0:
control.should_log = True
# Evaluate
if (
args.eval_strategy == IntervalStrategy.STEPS
and state.global_step % state.eval_steps == 0
and args.eval_delay <= state.global_step
):
control.should_evaluate = True
# Save
if (
args.save_strategy == SaveStrategy.STEPS
and state.save_steps > 0
and state.global_step % state.save_steps == 0
):
control.should_save = True
# End training
if state.global_step >= state.max_steps:
control.should_training_stop = True
# Save the model at the end if we have a save strategy
if args.save_strategy == SaveStrategy.STEPS:
control.should_save = True
return control
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# Log
if args.logging_strategy == IntervalStrategy.EPOCH:
control.should_log = True
# Evaluate
if args.eval_strategy == IntervalStrategy.EPOCH and args.eval_delay <= state.epoch:
control.should_evaluate = True
# Save
if args.save_strategy == SaveStrategy.EPOCH:
control.should_save = True
return control
class ProgressCallback(TrainerCallback):
"""
A [`TrainerCallback`] that displays the progress of training or evaluation.
You can modify `max_str_len` to control how long strings are truncated when logging.
"""
def __init__(self, max_str_len: int = 100):
"""
Initialize the callback with optional max_str_len parameter to control string truncation length.
Args:
max_str_len (`int`):
Maximum length of strings to display in logs.
Longer strings will be truncated with a message.
"""
self.training_bar = None
self.prediction_bar = None
self.max_str_len = max_str_len
def on_train_begin(self, args, state, control, **kwargs):
if state.is_world_process_zero:
self.training_bar = tqdm(total=state.max_steps, dynamic_ncols=True)
self.current_step = 0
def on_step_end(self, args, state, control, **kwargs):
if state.is_world_process_zero:
self.training_bar.update(state.global_step - self.current_step)
self.current_step = state.global_step
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if state.is_world_process_zero and has_length(eval_dataloader):
if self.prediction_bar is None:
self.prediction_bar = tqdm(
total=len(eval_dataloader), leave=self.training_bar is None, dynamic_ncols=True
)
self.prediction_bar.update(1)
def on_evaluate(self, args, state, control, **kwargs):
if state.is_world_process_zero:
if self.prediction_bar is not None:
self.prediction_bar.close()
self.prediction_bar = None
def on_predict(self, args, state, control, **kwargs):
if state.is_world_process_zero:
if self.prediction_bar is not None:
self.prediction_bar.close()
self.prediction_bar = None
def on_log(self, args, state, control, logs=None, **kwargs):
if state.is_world_process_zero and self.training_bar is not None:
# make a shallow copy of logs so we can mutate the fields copied
# but avoid doing any value pickling.
shallow_logs = {}
for k, v in logs.items():
if isinstance(v, str) and len(v) > self.max_str_len:
shallow_logs[k] = (
f"[String too long to display, length: {len(v)} > {self.max_str_len}. "
"Consider increasing `max_str_len` if needed.]"
)
else:
shallow_logs[k] = v
_ = shallow_logs.pop("total_flos", None)
# round numbers so that it looks better in console
if "epoch" in shallow_logs:
shallow_logs["epoch"] = round(shallow_logs["epoch"], 2)
self.training_bar.write(str(shallow_logs))
def on_train_end(self, args, state, control, **kwargs):
if state.is_world_process_zero:
self.training_bar.close()
self.training_bar = None
class PrinterCallback(TrainerCallback):
"""
A bare [`TrainerCallback`] that just prints the logs.
"""
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop("total_flos", None)
if state.is_local_process_zero:
print(logs)
class EarlyStoppingCallback(TrainerCallback, ExportableState):
"""
A [`TrainerCallback`] that handles early stopping.
Args:
early_stopping_patience (`int`):
Use with `metric_for_best_model` to stop training when the specified metric worsens for
`early_stopping_patience` evaluation calls.
early_stopping_threshold(`float`, *optional*):
Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the
specified metric must improve to satisfy early stopping conditions. `
This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric
in [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the
early stopping will not occur until the next save step.
"""
def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
self.early_stopping_patience = early_stopping_patience
self.early_stopping_threshold = early_stopping_threshold
# early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
self.early_stopping_patience_counter = 0
def check_metric_value(self, args, state, control, metric_value):
# best_metric is set by code for load_best_model
operator = np.greater if args.greater_is_better else np.less
if state.best_metric is None or (
operator(metric_value, state.best_metric)
and abs(metric_value - state.best_metric) > self.early_stopping_threshold
):
self.early_stopping_patience_counter = 0
else:
self.early_stopping_patience_counter += 1
def on_train_begin(self, args, state, control, **kwargs):
if not args.load_best_model_at_end:
logger.warning(
"Using EarlyStoppingCallback without load_best_model_at_end=True. "
"Once training is finished, the best model will not be loaded automatically."
)
assert args.metric_for_best_model is not None, (
"EarlyStoppingCallback requires metric_for_best_model to be defined"
)
assert args.eval_strategy != IntervalStrategy.NO, (
"EarlyStoppingCallback requires IntervalStrategy of steps or epoch"
)
def on_evaluate(self, args, state, control, metrics, **kwargs):
metric_to_check = args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics.get(metric_to_check)
if metric_value is None:
logger.warning(
f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping"
" is disabled"
)
return
self.check_metric_value(args, state, control, metric_value)
if self.early_stopping_patience_counter >= self.early_stopping_patience:
control.should_training_stop = True
def state(self) -> dict:
return {
"args": {
"early_stopping_patience": self.early_stopping_patience,
"early_stopping_threshold": self.early_stopping_threshold,
},
"attributes": {
"early_stopping_patience_counter": self.early_stopping_patience_counter,
},
}
| transformers/src/transformers/trainer_callback.py/0 | {
"file_path": "transformers/src/transformers/trainer_callback.py",
"repo_id": "transformers",
"token_count": 13464
} | 559 |
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import requires_backends
class LayoutLMv2Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["detectron2"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["detectron2"])
| transformers/src/transformers/utils/dummy_detectron2_objects.py/0 | {
"file_path": "transformers/src/transformers/utils/dummy_detectron2_objects.py",
"repo_id": "transformers",
"token_count": 131
} | 560 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import collections
import contextlib
import functools
import inspect
import math
import operator
import os
import random
import sys
import warnings
from typing import Any, Callable, Literal, Optional, Union
import torch
import torch.utils._pytree as pytree
from torch import nn
from torch.fx import Graph, GraphModule, Node, Proxy, Tracer
from torch.fx._compatibility import compatibility
from torch.fx._symbolic_trace import is_fx_tracing
from torch.fx.proxy import ParameterProxy
from .. import logging
from ..cache_utils import Cache, DynamicCache, StaticCache
from ..modeling_utils import PretrainedConfig, PreTrainedModel
from ..models.auto import get_values
from ..models.auto.modeling_auto import (
MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_BACKBONE_MAPPING_NAMES,
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
MODEL_FOR_CTC_MAPPING_NAMES,
MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_IMAGE_MAPPING_NAMES,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES,
MODEL_FOR_MASKED_LM_MAPPING_NAMES,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES,
MODEL_FOR_PRETRAINING_MAPPING_NAMES,
MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_MAPPING_NAMES,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
is_peft_available,
)
if is_peft_available():
from peft import PeftModel
logger = logging.get_logger(__name__)
_IS_IN_DEBUG_MODE = os.environ.get("FX_DEBUG_MODE", "").upper() in ENV_VARS_TRUE_VALUES
def _generate_supported_model_class_names(
model_name: type[PretrainedConfig],
supported_tasks: Optional[Union[str, list[str]]] = None,
) -> list[str]:
task_mapping = {
"default": MODEL_MAPPING_NAMES,
"pretraining": MODEL_FOR_PRETRAINING_MAPPING_NAMES,
"next-sentence-prediction": MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES,
"masked-lm": MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"causal-lm": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES,
"multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"masked-image-modeling": MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES,
"image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"zero-shot-image-classification": MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"ctc": MODEL_FOR_CTC_MAPPING_NAMES,
"audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"semantic-segmentation": MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"backbone": MODEL_FOR_BACKBONE_MAPPING_NAMES,
"image-feature-extraction": MODEL_FOR_IMAGE_MAPPING_NAMES,
}
if supported_tasks is None:
supported_tasks = task_mapping.keys()
if isinstance(supported_tasks, str):
supported_tasks = [supported_tasks]
model_class_names = []
for task in supported_tasks:
class_name = task_mapping[task].get(model_name, None)
if class_name:
model_class_names.append(class_name)
return model_class_names
_REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS = [
"altclip",
"albert",
"bart",
"bert",
"bitnet",
"blenderbot",
"blenderbot-small",
"bloom",
"clip",
"convnext",
"deberta",
"deberta-v2",
"dinov2",
"dinov3_convnext",
"dinov3_vit",
"distilbert",
"donut-swin",
"electra",
"gpt2",
"gpt_neo",
"gptj",
"hiera",
"hubert",
"ijepa",
"layoutlm",
"llama",
"cohere",
"lxmert",
"m2m_100",
"marian",
"mbart",
"megatron-bert",
"mistral",
"mixtral",
"mobilebert",
"mt5",
"nezha",
"opt",
"pegasus",
"plbart",
"qwen2",
"qwen2_moe",
"qwen3",
"qwen3_moe",
"resnet",
"roberta",
"segformer",
"speech_to_text",
"speech_to_text_2",
"swin",
"t5",
"trocr",
"vit",
"vjepa2",
"xglm",
"wav2vec2",
# "xlnet",
]
_FX_SUPPORTED_MODELS_WITH_KV_CACHE = ["llama", "opt"]
_REGULAR_SUPPORTED_MODELS = []
for item in _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS:
if isinstance(item, dict):
_REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(**item))
else:
_REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(item))
_SPECIAL_SUPPORTED_MODELS = [
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
"AltCLIPTextModel",
"AltCLIPVisionModel",
"GitVisionModel",
"GPT2DoubleHeadsModel",
"Speech2Text2Decoder",
"TrOCRDecoder",
"PeftModelForCausalLM",
"PeftModelForSeq2SeqLM",
"VJEPA2ForVideoClassification",
# TODO: add support for them as it should be quite easy to do so (small blocking issues).
# XLNetForQuestionAnswering,
]
_SUPPORTED_MODELS = tuple(sorted(set(_REGULAR_SUPPORTED_MODELS + _SPECIAL_SUPPORTED_MODELS)))
_CURRENT_TRACER = None
def torch_nn_embedding(self, input):
return torch.empty(*input.shape, self.weight.shape[-1], device="meta", dtype=self.weight.dtype)
def torch_nn_functional_embedding(
input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False
):
return torch.empty(*input.shape, weight.shape[-1], device="meta", dtype=weight.dtype)
def torch_nn_layernorm(self, input):
return input
def torch_nn_groupnorm(self, input):
return input
def torch_nn_linear(self, input):
return torch.empty(input.shape[:-1] + (self.out_features,), device="meta")
def torch_relu(x):
return x
def torch_nn_relu(self, x):
return x
def torch_nn_functional_relu(x, inplace=False):
if not inplace:
raise ValueError("Don't support in-place functional.relu for MetaTensor analysis")
return x
def torch_where(condition, x, y):
# torch.where returns the broadcasted tensor of condition, x, and y,
# so hack it by using addition
return condition.to(device="meta") + x.to(device="meta") + y.to(device="meta")
def torch_abs(input, *, out=None):
if out is not None:
raise ValueError("Don't support in-place abs for MetaTensor analysis")
return input
def torch_arange(*args, **kwargs):
n = len(args)
step = 1
if n == 1:
start = 0
end = args[0]
elif n == 2:
start, end = args
else:
start, end, step = args
if isinstance(start, float):
start = int(start)
if isinstance(end, float):
start = int(end)
if isinstance(step, float):
step = int(step)
step = kwargs.get("step", step)
dtype = kwargs.get("dtype")
return torch.empty((end - start) // step, dtype=dtype, device="meta")
def torch_full(*args, **kwargs):
args = list(args)
# We set the fill value to 1 as its value is not important as long as it's not a tensor on the `meta` device.
if len(args) > 1:
args[1] = 1
else:
kwargs["fill_value"] = 1
kwargs_without_device = dict(kwargs)
kwargs_without_device.pop("device", None)
return torch.full(*args, **kwargs_without_device, device="meta")
def torch_cat(tensors, dim=None, axis=None, *, out=None):
if dim is None and axis is None:
dim = 0
if dim is None and axis is not None:
dim = axis
if dim < 0:
dim = tensors[0].dim() + dim
shapes = [t.shape for t in tensors]
shape = list(shapes[0])
concatenated_dim = sum(shape[dim] for shape in shapes)
final_shape = shape[:dim] + [concatenated_dim] + shape[dim + 1 :]
return torch.empty(final_shape, device="meta")
def torch_stack(tensors, dim=None, axis=None, *, out=None):
if dim is None and axis is None:
dim = 0
if dim is None and axis is not None:
dim = axis
if dim < 0:
dim = tensors[0].dim() + 1 + dim
shape = list(tensors[0].shape)
shape.insert(dim, len(tensors))
return torch.empty(shape, device="meta")
def torch_add(input, other, *, alpha=1, out=None):
if not isinstance(input, torch.Tensor):
return torch.empty_like(other, device="meta")
if not isinstance(other, torch.Tensor):
return torch.empty_like(input, device="meta")
max_length = max(input.dim(), other.dim())
input_shape = list(input.shape) + [1] * (max_length - input.dim())
other_shape = list(other.shape) + [1] * (max_length - other.dim())
shape = []
for i in range(max_length):
shape.append(max(input_shape[i], other_shape[i]))
return torch.empty(shape, device="meta")
def torch_mul(input, other, *, out=None):
return torch_add(input, other, out=out)
def torch_tensor_mul(self, other):
return torch_mul(self, other)
def torch_matmul(input, other, *, out=None):
d1 = input.dim()
d2 = other.dim()
shape = None
if d1 == 1 and d2 == 1:
shape = None
elif d1 == 2 and d2 == 2:
shape = (input.size(0), other.size(1))
elif d1 == 1 and d2 == 2:
shape = (other.size(1),)
elif d1 == 2 and d1 == 1:
shape = (input.size(0),)
else:
max_length = max(input.dim(), other.dim())
shape1 = list(input.shape)
shape2 = list(other.shape)
if d1 == 1:
shape1 = [1] + shape1
if d2 == 1:
shape2.append(1)
shape1 = [-1] * (max_length - d1) + list(input.shape)
shape2 = [-1] * (max_length - d2) + list(other.shape)
shape = []
for i in range(max_length):
shape.append(max(shape1[i], shape2[i]))
shape[-2] = shape1[-2]
shape[-1] = shape2[-1]
if d1 == 1:
shape.pop(-2)
if d2 == 1:
shape.pop(-1)
if shape is None:
return torch.tensor(0.0, device="meta")
return torch.empty(*shape, device="meta")
def torch_bmm(input, mat2, *, out=None):
if out is not None:
raise ValueError("Don't support in-place bmm for MetaTensor analysis")
batch_size, n, m = input.shape
_, _, p = mat2.shape
return torch.empty(batch_size, n, p, device="meta")
def torch_baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None):
if out is not None:
raise ValueError("Don't support in-place baddbmm for MetaTensor analysis")
return torch_bmm(batch1, batch2)
def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None):
return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out)
def torch_einsum(equation, *operands):
# TODO: infer shape without performing the computation, this might be quite hard.
concrete_operands = (torch.empty_like(operand, device="cpu") for operand in operands)
return torch.einsum(equation, *concrete_operands).to("meta")
def torch_tensor_repeat(self, *sizes):
shape = list(self.shape)
for i, x in enumerate(sizes):
shape[i] *= x
return torch.empty(shape, device="meta")
def torch_repeat_interleave(*args, dim=None, output_size=None):
num_args = len(args)
if num_args == 1:
shape = [output_size if output_size is not None else args[0].sum()]
else:
shape = list(args[0].shape)
if dim is None:
if num_args > 2:
dim = args[2]
else:
shape = [sum(shape)]
dim = 0
repeats = args[1]
if isinstance(repeats, int) or torch.numel(repeats) == 1:
shape[dim] *= int(repeats)
else:
shape[dim] = output_size if output_size is not None else repeats.sum()
return torch.empty(*shape, device="meta")
def torch_index_select(input, dim, index, *, out=None):
shape = list(input.shape)
shape[dim] = len(index)
return torch.empty(*shape, device="meta")
def torch_tensor_index_select(self, dim, index):
return torch_index_select(self, dim, index)
def torch_gather(input, dim, index, *, sparse_grad=False, out=None):
shape = list(input.shape)
shape[dim] = index.shape[dim]
return torch.empty(*shape, device="meta")
def torch_tensor_gather(self, dim, index):
return torch_gather(self, dim, index)
def torch_roll(input, shifts, dims=None):
return input
def torch_flip(input, dims):
return input
def torch_tensor_flip(self, dims):
return self
def torch_nn_conv1d(self, input):
l_in = input.shape[-1]
shape = None
padding = self.padding
if padding == "valid":
padding = (0, 0)
if padding == "same":
shape = list(input.shape)
if shape is None:
shape = list(input.shape)
l_out = math.floor(
(l_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1
)
shape[-1] = l_out
shape[-2] = self.out_channels
return torch.empty(shape, device="meta")
def torch_nn_conv2d(self, input):
h_in, w_in = input.shape[-2:]
shape = None
padding = self.padding
if padding == "valid":
padding = (0, 0)
if padding == "same":
shape = list(input.shape)
if shape is None:
shape = list(input.shape)
h_out = math.floor(
(h_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1
)
w_out = math.floor(
(w_in + 2 * padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride[1] + 1
)
shape[-2:] = [h_out, w_out]
shape[-3] = self.out_channels
return torch.empty(shape, device="meta")
def torch_squeeze(input, dim=None):
shape = list(input.shape)
if dim is not None:
if dim < 0:
dim = input.dim() + dim
if shape[dim] == 1:
shape.pop(dim)
else:
new_shape = []
for dim_value in shape:
if dim_value == 1:
continue
new_shape.append(dim_value)
shape = new_shape
return torch.empty(shape, device="meta")
def torch_tensor_squeeze(self, dim=None):
return torch_squeeze(self, dim)
def torch_unsqueeze(input, dim):
shape = list(input.shape)
if dim < 0:
dim = input.dim() + 1 + dim
shape.insert(dim, 1)
return torch.empty(shape, device="meta")
def torch_tensor_unsqueeze(self, dim):
return torch_unsqueeze(self, dim)
def torch_unique_consecutive(input, **kwargs):
output = torch.unique_consecutive(torch.zeros_like(input, device="cpu"), **kwargs)
if isinstance(output, torch.Tensor):
return output.to("meta")
else:
return tuple(map(output, lambda x: x.to("meta")))
def torch_nn_functional_one_hot(tensor, num_classes=-1):
if num_classes < 0:
raise ValueError("Don't support automatic num_classes inference for MetaTensor analysis")
shape = list(tensor.shape) + [num_classes]
return torch.empty(shape, device="meta")
def torch_nn_functional_scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None
):
target_length = query.shape[-2]
head_dim = value.shape[-1]
return torch.empty((*query.shape[:-2], target_length, head_dim), device="meta")
def torch_nn_mseloss(self, input, target):
if self.reduction == "none":
shape = target.shape
else:
shape = (1,)
return torch.empty(shape, device="meta")
def torch_nn_crossentropyloss(self, input, target):
if self.reduction == "none":
shape = target.shape
else:
shape = (1,)
return torch.empty(shape, device="meta")
def torch_nn_bcewithlogitsloss(self, input, target):
if self.reduction == "none":
shape = target.shape
else:
shape = (1,)
return torch.empty(shape, device="meta")
def operator_getitem(a, b):
def to_concrete(t):
if isinstance(t, torch.Tensor):
concrete = torch.ones_like(t, device="cpu")
if concrete.dtype in [torch.float16, torch.float32, torch.float64, torch.int32]:
concrete = concrete.to(torch.int64)
return concrete
return t
if isinstance(a, torch.Tensor):
# TODO: infer shape without performing the computation.
if isinstance(b, tuple):
b = tuple(map(to_concrete, b))
else:
b = to_concrete(b)
return operator.getitem(torch.empty_like(a, device="cpu"), b).to("meta")
return operator.getitem(a, b)
_MANUAL_META_OVERRIDES: dict[Callable, Callable] = {
torch.nn.Embedding: torch_nn_embedding,
torch.nn.functional.embedding: torch_nn_functional_embedding,
torch.nn.LayerNorm: torch_nn_layernorm,
torch.nn.GroupNorm: torch_nn_groupnorm,
torch.nn.Linear: torch_nn_linear,
torch.relu: torch_relu,
torch.nn.functional.relu: torch_nn_functional_relu,
torch.nn.ReLU: torch_nn_relu,
torch.where: torch_where,
torch.abs: torch_abs,
torch.arange: torch_arange,
torch.full: torch_full,
torch.cat: torch_cat,
torch.stack: torch_stack,
torch.add: torch_add,
torch.mul: torch_mul,
torch.Tensor.mul: torch_tensor_mul,
torch.matmul: torch_matmul,
torch.bmm: torch_bmm,
torch.baddbmm: torch_baddbmm,
torch.Tensor.baddbmm: torch_tensor_baddbmm,
torch.einsum: torch_einsum,
torch.Tensor.repeat: torch_tensor_repeat,
torch.repeat_interleave: torch_repeat_interleave,
torch.roll: torch_roll,
torch.flip: torch_flip,
torch.Tensor.flip: torch_tensor_flip,
torch.index_select: torch_index_select,
torch.Tensor.index_select: torch_tensor_index_select,
torch.gather: torch_gather,
torch.Tensor.gather: torch_tensor_gather,
torch.nn.Conv1d: torch_nn_conv1d,
torch.nn.Conv2d: torch_nn_conv2d,
torch.squeeze: torch_squeeze,
torch.Tensor.squeeze: torch_tensor_squeeze,
torch.unsqueeze: torch_unsqueeze,
torch.Tensor.unsqueeze: torch_tensor_unsqueeze,
torch.unique_consecutive: torch_unique_consecutive,
torch.nn.functional.one_hot: torch_nn_functional_one_hot,
torch.nn.MSELoss: torch_nn_mseloss,
torch.nn.CrossEntropyLoss: torch_nn_crossentropyloss,
torch.nn.BCEWithLogitsLoss: torch_nn_bcewithlogitsloss,
operator.getitem: operator_getitem,
}
_MANUAL_META_OVERRIDES[torch.nn.functional.scaled_dot_product_attention] = (
torch_nn_functional_scaled_dot_product_attention
)
class HFProxy(Proxy):
"""
Proxy that uses metadata to handle data-dependent control-flow.
"""
def install_metadata(self, metadata):
self._metadata = metadata
@property
def shape(self):
return self.tracer.create_proxy("call_method", "size", (self,), {})
@property
def device(self):
# Hack so we can track when devices are used. During meta-tensor propagation,
# replace these values with a constant 'meta'
return MetaDeviceAttribute(self, "device")
def __len__(self):
if hasattr(self, "_metadata") and self._metadata is not None:
return len(self._metadata)
return super().__len__()
def __bool__(self):
if hasattr(self, "_metadata") and self._metadata is not None:
return self._metadata
return super().__bool__()
def __getattr__(self, k):
if k == "_metadata":
return self.__getattribute__(k)
# note: not added to the graph yet, if this is a method call
# we peephole optimize to the method invocation
return HFAttribute(self, k)
def __setitem__(self, indices, values):
return self.tracer.create_proxy("call_function", operator.setitem, (self, indices, values), {})
def __contains__(self, key):
if hasattr(self, "_metadata") and self._metadata is not None:
return key in self._metadata
return super().__contains__(key)
class HFAttribute(HFProxy):
def __init__(self, root, attr: str):
self.root = root
self.attr = attr
self.tracer = root.tracer
self._node = None
if hasattr(self.root, "_metadata"):
self.install_metadata(getattr(self.root._metadata, attr))
@property
def node(self):
# the node for attributes is added lazily, since most will just be method calls
# which do not rely on the getitem call
if self._node is None:
self._node = self.tracer.create_proxy("call_function", builtins.getattr, (self.root, self.attr), {}).node
return self._node
def __call__(self, *args, **kwargs):
return self.tracer.create_proxy("call_method", self.attr, (self.root,) + args, kwargs)
class MetaDeviceAttribute(HFAttribute):
pass
class HFCacheProxy(HFProxy):
"""
Proxy that represents an instance of `transformers.cache_utils.Cache`.
"""
def install_orig_cache_cls(self, orig_cache_cls: type[Cache]):
self._orig_cache_cls = orig_cache_cls
@property
def __class__(self):
if not hasattr(self, "_orig_cache_cls"):
raise RuntimeError("The original Cache class must be installed to the HFCacheProxy.")
return self.tracer._CLASSES_TO_PATCH[self._orig_cache_cls]
def create_wrapper(
function: Callable,
op_type: Union[Literal["call_function"], Literal["call_method"], Literal["get_attr"]],
proxy_factory_fn: Optional[Callable[[Node], Proxy]] = None,
) -> Callable:
@functools.wraps(function)
def wrapper(*args, **kwargs):
if not is_fx_tracing():
return function(*args, **kwargs)
found_proxies = []
def check_proxy(a):
if isinstance(a, Proxy):
found_proxies.append(a)
torch.fx.node.map_aggregate(args, check_proxy)
torch.fx.node.map_aggregate(kwargs, check_proxy)
if len(found_proxies) > 0:
tracer = found_proxies[0].tracer
if op_type == "call_function":
target = function
elif op_type == "call_method" or op_type == "get_attr":
target = function.__name__
else:
raise ValueError(f"op_type {op_type} not supported.")
return tracer.create_proxy(op_type, target, args, kwargs, proxy_factory_fn=proxy_factory_fn)
else:
return function(*args, **kwargs)
return wrapper
class HFProxyableClassMeta(type):
"""
Metaclass that creates a class with its main methods wrapped to be proxyable.
"""
def __new__(
cls,
name: str,
bases: tuple[type, ...],
attrs: dict[str, Any],
proxy_factory_fn: Optional[Callable[[Node], Proxy]] = None,
):
cls = super().__new__(cls, name, bases, attrs)
for attr_name in dir(cls):
attr = getattr(cls, attr_name, None)
if attr is None:
continue
if attr_name == "__init__":
op_type = "call_function"
elif attr_name.startswith("__"):
op_type = None
elif inspect.ismethod(attr):
op_type = "call_function"
elif inspect.isfunction(attr):
op_type = "call_method"
else:
op_type = None
if op_type is not None:
setattr(cls, attr_name, create_wrapper(attr, op_type, proxy_factory_fn=proxy_factory_fn))
return cls
def gen_constructor_wrapper(target: Callable) -> tuple[Callable, Callable]:
"""
Wraps `target` to be proxyable. Used for tensor creators like `torch.ones`, `torch.arange` and so on.
"""
wrapper = create_wrapper(target, "call_function")
return wrapper, target
def _proxies_to_metas(v):
"""Returns the underlying metadata for HFProxies, and behaves like the identity for the others."""
if isinstance(v, MetaDeviceAttribute):
return "meta"
if isinstance(v, torch.fx.Proxy):
if not (isinstance(v, HFProxy) and hasattr(v, "_metadata")):
raise RuntimeError(f"No metadata was found for {v}")
return v._metadata
return v
def create_cache_proxy_factory_fn(orig_cache_cls: type[Cache]) -> Callable[[Node], HFCacheProxy]:
def cache_proxy_factory_fn(n: Node) -> HFCacheProxy:
if not isinstance(_CURRENT_TRACER, HFTracer):
raise RuntimeError("Cannot create HFCacheProxy because there is no HFTracer currently tracing.")
cache_proxy = HFCacheProxy(n, _CURRENT_TRACER)
cache_proxy.install_orig_cache_cls(orig_cache_cls)
return cache_proxy
return cache_proxy_factory_fn
# Proxyable equivalent of the cache classes defined in `transformers.cache_utils`.
ProxyableCache = HFProxyableClassMeta(
"ProxyableCache", (Cache,), {}, proxy_factory_fn=create_cache_proxy_factory_fn(Cache)
)
ProxyableDynamicCache = HFProxyableClassMeta(
"ProxyableDynamicCache",
(DynamicCache,),
{},
proxy_factory_fn=create_cache_proxy_factory_fn(DynamicCache),
)
ProxyableStaticCache = HFProxyableClassMeta(
"ProxyableStaticCache",
(StaticCache,),
{},
proxy_factory_fn=create_cache_proxy_factory_fn(StaticCache),
)
def _generate_random_int(low: int = 10, high: int = 20, forbidden_values: Optional[list[int]] = None):
if forbidden_values is None:
forbidden_values = []
value = random.randint(low, high)
while value in forbidden_values:
value = random.randint(low, high)
return value
class HFTracer(Tracer):
"""
Tracer that is able to symbolically trace models from the library. To do that, it uses the HFProxy instead of the
regular PyTorch torch.fx.Proxy.
"""
# Feature flag for proxying accesses to buffer values
proxy_buffer_attributes: bool = True
allow_insert_stateless_mods: bool = True
_TORCH_METHODS_TO_PATCH = [
"arange",
"zeros",
"ones",
"full",
"full_like",
"eye",
"empty",
"tensor",
"clamp",
"finfo",
"tril",
]
_CLASSES_TO_PATCH = {
Cache: ProxyableCache,
DynamicCache: ProxyableDynamicCache,
StaticCache: ProxyableStaticCache,
}
supported_archs = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel)
def __init__(self, autowrap_modules=(math,), autowrap_functions=()):
super().__init__(autowrap_modules=autowrap_modules, autowrap_functions=autowrap_functions)
def _generate_dummy_input(
self, model: "PreTrainedModel", input_name: str, shape: list[int], input_names: list[str]
) -> dict[str, torch.Tensor]:
"""Generates dummy input for model inference recording."""
# Retrieving the model class, either from the "class_for_deserialization" attribute if the model was restored
# from pickle, or from the "__class__" attribute in the general case.
model_class_name = getattr(model, "class_for_deserialization", model.__class__).__name__
device = model.device
inputs_dict = {}
# when tracing a model with KV cache, we simply need to unsure that the KV cache length is larger than one to
# rightfully pass certain controlflows (Example: https://github.com/huggingface/transformers/blob/5c8d941d66734811d2ef6f57f15b44f7fb7a98c4/src/transformers/modeling_attn_mask_utils.py#L162).
# After tracing, the model can then still be used with arbitrary lengths different than the one used during tracing.
kv_cache_length = 5
if input_name in ["labels", "start_positions", "end_positions"]:
batch_size = shape[0]
if model_class_name in [
*get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES),
*get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES),
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES),
*get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES),
*get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES),
*get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES),
]:
inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device)
elif model_class_name in [
*get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES),
*get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES),
"XLNetForQuestionAnswering",
]:
inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device)
inputs_dict["end_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device)
elif model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES):
if not hasattr(model.config, "problem_type") or model.config.problem_type is None:
raise ValueError(
"Could not retrieve the problem type for the sequence classification task, please set "
'model.config.problem_type to one of the following values: "regression", '
'"single_label_classification", or "multi_label_classification".'
)
if model.config.problem_type == "regression":
labels_shape = (batch_size, model.config.num_labels)
labels_dtype = torch.float32
elif model.config.problem_type == "single_label_classification":
labels_shape = (batch_size,)
labels_dtype = torch.long
elif model.config.problem_type == "multi_label_classification":
labels_shape = (batch_size, model.config.num_labels)
labels_dtype = torch.float32
else:
raise ValueError(
'Expected model.config.problem_type to be either: "regression", "single_label_classification"'
f', or "multi_label_classification", but "{model.config.problem_type}" was provided.'
)
inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device)
elif model_class_name in [
*get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES),
*get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES),
*get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES),
*get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES),
*get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES),
*get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES),
"GPT2DoubleHeadsModel",
"PeftModelForCausalLM",
"PeftModelForSeq2SeqLM",
]:
inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device)
elif model_class_name in [*get_values(MODEL_FOR_CTC_MAPPING_NAMES)]:
inputs_dict["labels"] = torch.zeros(shape, dtype=torch.float32, device=device)
else:
raise NotImplementedError(
f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet."
)
elif "pixel_values" in input_name:
batch_size = shape[0]
image_size = getattr(model.config, "image_size", None)
if image_size is None:
if hasattr(model.config, "vision_config"):
image_size = model.config.vision_config.image_size
elif hasattr(model.config, "encoder"):
image_size = model.config.encoder.image_size
else:
image_size = (_generate_random_int(), _generate_random_int())
# If no num_channels is in the config, use some arbitrary value.
num_channels = getattr(model.config, "num_channels", 3)
if not isinstance(image_size, collections.abc.Iterable):
image_size = (image_size, image_size)
height, width = image_size
inputs_dict[input_name] = torch.zeros(
batch_size, num_channels, height, width, dtype=torch.float32, device=device
)
elif "bbox" in input_name:
inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device)
elif "input_features" in input_name:
inputs_dict[input_name] = torch.zeros(
*shape, model.config.input_feat_per_channel, dtype=torch.float, device=device
)
elif "inputs_embeds" in input_name:
batch_size = shape[0]
if (
getattr(model.config, "embedding_size", None) is not None
and model.config.model_type != "megatron-bert"
):
embedding_size = model.config.embedding_size
else:
embedding_size = model.config.hidden_size
if len(shape) == 3:
# (batch_size, num_choices, sequence_length, embedding_size)
embedding_shape = (batch_size, shape[1], shape[2], embedding_size)
else:
# (batch_size, sequence_length, embedding_size)
embedding_shape = (batch_size, shape[1], embedding_size)
inputs_dict[input_name] = torch.zeros(embedding_shape, dtype=torch.float, device=device)
elif "visual_feats" in input_name:
inputs_dict[input_name] = torch.zeros(
shape
+ [
model.config.visual_feat_dim,
],
dtype=torch.float,
device=device,
)
elif "visual_pos" in input_name:
inputs_dict[input_name] = torch.zeros(
shape
+ [
model.config.visual_pos_dim,
],
dtype=torch.float,
device=device,
)
elif "inputs" in input_name:
inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device)
elif "input_values" in input_name:
batch_size, _ = shape
# Generating big sequence length for audio inputs.
seq_length = _generate_random_int(low=10000, high=20000)
inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device)
elif "mask" in input_name:
if "past_key_values" in input_names:
mask_shape = [shape[0], shape[1] + kv_cache_length]
else:
mask_shape = shape
inputs_dict[input_name] = torch.zeros(mask_shape, dtype=torch.long, device=device)
elif "ids" in input_name:
inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device)
elif "past_key_values" in input_name:
if model.config.model_type not in _FX_SUPPORTED_MODELS_WITH_KV_CACHE:
raise NotImplementedError(
f"Symbolic trace with past_key_values input is not supported yet for the model {model.config.model_type}. Please open an issue or a PR in Transformers repository if you would like to see the support added."
)
num_heads = model.config.num_attention_heads
head_dim = model.config.hidden_size // model.config.num_attention_heads
cache_shape = (shape[0], num_heads, kv_cache_length, head_dim)
pkv = tuple(
(
torch.rand(cache_shape, dtype=torch.float, device=device),
torch.rand(cache_shape, dtype=torch.float, device=device),
)
for i in range(model.config.num_hidden_layers)
)
inputs_dict[input_name] = pkv
else:
shape_with_hidden_size = shape + [model.config.hidden_size]
inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device)
return inputs_dict
def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None):
rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn)
if kind == "placeholder" and target in self.meta_args:
rv.install_metadata(self.meta_args[target])
return rv
if target in self.orig_fns:
# NOTE: tensor constructors in PyTorch define the `device` argument as
# *kwargs-only*. That is why this works. If you add methods to
# _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only,
# this will break and you will likely see issues where we cannot infer
# the size of the output.
if "device" in kwargs:
kwargs["device"] = "meta"
try:
args_metas = torch.fx.node.map_aggregate(args, _proxies_to_metas)
kwargs_metas = torch.fx.node.map_aggregate(kwargs, _proxies_to_metas)
should_install_metadata = True
self._disable_module_getattr = True
self._disable_call_module = True
if kind == "call_function":
meta_target = _MANUAL_META_OVERRIDES.get(target, target)
meta_out = meta_target(*args_metas, **kwargs_metas)
if isinstance(meta_out, torch.Tensor):
meta_out = meta_out.to(device="meta")
elif kind == "call_method":
method = getattr(args_metas[0].__class__, target)
meta_target = _MANUAL_META_OVERRIDES.get(method, method)
meta_out = meta_target(*args_metas, **kwargs_metas)
elif kind == "call_module":
if not hasattr(self, "orig_forward"):
raise AttributeError(f"{self} does not have an attribute called orig_forward")
mod = self.root.get_submodule(target)
mod_type = type(mod)
if mod_type in _MANUAL_META_OVERRIDES:
meta_out = _MANUAL_META_OVERRIDES[mod_type](mod, *args_metas, **kwargs_metas)
else:
meta_out = self.orig_forward(*args_metas, **kwargs_metas)
elif kind == "get_attr":
attr_itr = self.root
atoms = target.split(".")
for atom in atoms:
attr_itr = getattr(attr_itr, atom)
if isinstance(attr_itr, torch.Tensor):
meta_out = attr_itr.to(device="meta")
else:
meta_out = attr_itr
else:
should_install_metadata = False
if should_install_metadata:
if not isinstance(rv, Proxy):
raise ValueError("Don't support composite output yet")
rv.install_metadata(meta_out)
except Exception as e:
if _IS_IN_DEBUG_MODE:
warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}")
self._disable_module_getattr = False
self._disable_call_module = False
return rv
# Replaced by .getattr from PyTorch 1.13
def _module_getattr(self, attr, attr_val, parameter_proxy_cache):
if getattr(self, "_disable_module_getattr", False):
return attr_val
else:
def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache):
for n, p in collection_to_search:
if attr_val is p:
if n not in parameter_proxy_cache:
kwargs = {}
if "proxy_factory_fn" in inspect.signature(self.create_proxy).parameters:
kwargs["proxy_factory_fn"] = (
None
if not self.param_shapes_constant
else lambda node: ParameterProxy(self, node, n, attr_val)
)
val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type]
parameter_proxy_cache[n] = val_proxy
return parameter_proxy_cache[n]
return None
if isinstance(attr_val, torch.nn.Parameter):
maybe_parameter_proxy = maybe_get_proxy_for_attr(
attr_val, self.root.named_parameters(), parameter_proxy_cache
)
if maybe_parameter_proxy is not None:
return maybe_parameter_proxy
if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor):
maybe_buffer_proxy = maybe_get_proxy_for_attr(
attr_val, self.root.named_buffers(), parameter_proxy_cache
)
if maybe_buffer_proxy is not None:
return maybe_buffer_proxy
return attr_val
# Needed for PyTorch 1.13+
def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: dict[str, Any]):
return self._module_getattr(attr, attr_val, parameter_proxy_cache)
def call_module(self, m, forward, args, kwargs):
if getattr(self, "_disable_call_module", False):
return forward(*args, **kwargs)
self.orig_forward = forward
return super().call_module(m, forward, args, kwargs)
def proxy(self, node):
return HFProxy(node, self)
@contextlib.contextmanager
def patch_for_tracing(self, root: Union[torch.nn.Module, Callable[..., Any]]):
# Patching torch functions
self.patched_torch_methods = {
target: gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH
}
self.orig_fns = set()
for name, (wrapper, orig) in self.patched_torch_methods.items():
setattr(torch, name, wrapper)
self.orig_fns.add(orig)
# Patching classes
patched = []
module_of_model = inspect.getmodule(root)
for name, mod in sys.modules.items():
if module_of_model is not None and mod is not module_of_model:
continue
if not name.startswith("transformers"):
continue
for orig_cls, patched_cls in self._CLASSES_TO_PATCH.items():
for attr_name, attr in mod.__dict__.items():
if attr is orig_cls:
patched.append((mod, attr_name, orig_cls))
setattr(mod, attr_name, patched_cls)
yield
# Restoring patched functions and classes.
for name, (_, orig) in self.patched_torch_methods.items():
setattr(torch, name, orig)
self.patched_torch_methods = {}
self.orig_fns = set()
for mod, attr_name, orig_cls in patched:
setattr(mod, attr_name, orig_cls)
def trace(
self,
root: Union[torch.nn.Module, Callable[..., Any]],
concrete_args: Optional[dict[str, Any]] = None,
dummy_inputs: Optional[dict[str, Any]] = None,
complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True,
) -> Graph:
"""
Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a
`torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from
the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a
`torch.nn.Module` instance to use as the root and add embedded constants to.
Args:
root (`torch.nn.Module` or `Callable`):
Either a `torch.nn.Module`` or a function to be traced through. If root is not a
[`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail.
concrete_args (`dict[str, Any], *optional*):
Concrete arguments that should not be treated as Proxies
dummy_inputs (`dict[str, Any]`, *optional*):
The dummy inputs needed to handle data-dependent control-flow if `root` is not a
[`~transformers.PreTrainedModel`]. It can also be used when `root` is a
[`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs.
complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`):
If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in
`dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing.
Returns:
`torch.fx.Graph`:
A FX `torch.fx.Graph` representing the semantics of the passed-in `root`.
"""
sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root)
if concrete_args is None:
concrete_args = {}
if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs:
for param in sig.parameters.values():
if param.name in dummy_inputs:
continue
if param.default is inspect.Parameter.empty:
raise ValueError(f"You need to specify a default value for the parameter {param.name}.")
concrete_args.update(
{
p.name: p.default
for p in sig.parameters.values()
if (p.name not in dummy_inputs and p.name not in concrete_args)
}
)
input_names = sig.parameters.keys() - concrete_args.keys()
# Creating a random input shape to generate dummy inputs.
batch_size = _generate_random_int()
sequence_length = _generate_random_int()
shape = [batch_size, sequence_length]
if root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES):
num_choices = _generate_random_int(low=2, high=5)
shape.insert(1, num_choices)
inputs = dict(dummy_inputs) if dummy_inputs is not None else {}
for input_name in input_names:
if input_name in inputs:
continue
# We enforce that root must either be a PreTrainedModel or deserialized from a serialized traced model to
# be able to use HFTracer._generate_dummy_input.
if isinstance(root, self.supported_archs) or type(root).__qualname__.startswith(
("_deserialize_graph_module", "_CodeOnlyModule")
):
inputs.update(self._generate_dummy_input(root, input_name, shape, input_names=input_names))
else:
raise RuntimeError(
f"Could not generate input named {input_name} for because root is not a"
" transformers.PreTrainedModel."
)
def to_meta(value):
if isinstance(value, torch.Tensor):
return value.to("meta")
return value
concrete_metas = pytree.tree_map(to_meta, inputs)
for param in sig.parameters.values():
if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names:
concrete_metas[f"**{param.name}"] = {}
self.meta_args = concrete_metas
global _CURRENT_TRACER
_CURRENT_TRACER = self
with self.patch_for_tracing(root):
try:
self.graph = super().trace(root, concrete_args=concrete_args)
finally:
_CURRENT_TRACER = None
# This is necessary because concrete args are added as input to the traced module since
# https://github.com/pytorch/pytorch/pull/55888.
for node in self.graph.nodes:
if node.op == "placeholder":
# Removing default values for inputs as the forward pass will fail with them.
if node.target in input_names:
node.args = ()
# Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor].
# It cannot infer on the attributes and methods the input should have, and fails.
node.type = torch.Tensor
# It is a concrete arg so it is not used and should be removed.
else:
to_visit = [node]
to_delete = collections.OrderedDict()
while to_visit:
n = to_visit.pop(0)
to_delete[n] = None
to_visit += list(n.users.keys())
for user in reversed(to_delete.keys()):
self.graph.erase_node(user)
# TODO: solves GraphModule creation.
# Without this, return type annotation "Tuple" is causing code execution failure.
if node.op == "output":
node.type = None
return self.graph
def _stateless_mod_instanciation_depends_on_proxies(self, mod: nn.Module) -> bool:
"""
Whether the module was instantiated with Proxies. If that is the case, such module cannot be a leaf module
because its attributes are input-dependent.
"""
return any(isinstance(attr, Proxy) for attr in mod.__dict__.values())
def _insert_module_as_submodule(self, mod: nn.Module) -> str:
"""
Helper method which tries to insert a module that was not declared as submodule.
"""
# If one of the module attributes is a Proxy, it means that its instantiation is input-dependent.
# It is not possible to insert such modules, those should be traced through.
if self._stateless_mod_instanciation_depends_on_proxies(mod):
return ""
idx = 0
mod_name = mod.__class__.__name__.lower()
path = f"{mod_name}_{idx}"
already_inserted = False
while hasattr(self.root, path):
if getattr(self.root, path) is mod:
already_inserted = True
break
path = f"{mod_name}_{idx}"
idx += 1
# No need to add multiple instances of the same module.
if not already_inserted:
self.root.add_module(path, mod)
return path
def path_of_module(self, mod: nn.Module) -> str:
"""
Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has
a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the
string "foo.bar".
Args:
mod (str): The `Module` to retrieve the qualified name for.
"""
try:
return super().path_of_module(mod)
except NameError as e:
if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0:
path = self._insert_module_as_submodule(mod)
return path
raise e
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
return (not self._stateless_mod_instanciation_depends_on_proxies(m)) and super().is_leaf_module(
m, module_qualified_name
)
@compatibility(is_backward_compatible=True)
def keys(self, obj: "Proxy") -> Any:
"""Called when a proxy object is has the keys() method called.
This is what happens when ** is called on a proxy. This should return an iterator if ** is supposed to work in
your custom tracer.
"""
attribute = HFAttribute(obj, "keys")()
if obj.node.target.startswith("**"):
return attribute._metadata
return attribute
def get_concrete_args(model: nn.Module, input_names: list[str]):
sig = inspect.signature(model.forward)
if not (set(input_names) <= set(sig.parameters.keys())):
formatted_input_names = input_names[0] if len(input_names) == 1 else ", ".join(input_names)
formatted_allowed_input_names = ", ".join(sig.parameters.keys())
raise ValueError(
f"The model does not have input(s) named: {formatted_input_names}, expected a subset of the following:"
f" {formatted_allowed_input_names}"
)
return {p.name: p.default for p in sig.parameters.values() if p.name not in input_names}
def is_model_supported(model: "PreTrainedModel"):
return model.__class__.__name__ in _SUPPORTED_MODELS
def check_if_model_is_supported(model: "PreTrainedModel"):
if not is_model_supported(model):
supported_model_names = ", ".join(_SUPPORTED_MODELS)
raise NotImplementedError(
f"Model {model.__class__.__name__} is not supported yet, supported models: {supported_model_names}"
)
def symbolic_trace(
model: "PreTrainedModel",
input_names: Optional[list[str]] = None,
disable_check: bool = False,
tracer_cls: type[HFTracer] = HFTracer,
) -> GraphModule:
"""
Performs symbolic tracing on the model.
Args:
model ([`PretrainedModel`]):
The model to trace.
input_names (`list[str]`, *optional*):
The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead.
disable_check (`bool`, *optional*, defaults to `False`):
If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes.
tracer_cls (`Type[HFTracer]`, *optional*, defaults to `HFTracer`):
The tracer class to use for instantiating the tracer. If unset, `HFTracer` is used instead.
Returns:
`torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model.
Example:
```python
from transformers.utils.fx import symbolic_trace
traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"])
```
"""
if input_names is None:
input_names = model.dummy_inputs.keys()
input_names = list(input_names)
concrete_args = get_concrete_args(model, input_names)
if not disable_check:
check_if_model_is_supported(model)
if "past_key_values" in input_names and not getattr(model.config, "use_cache", False):
logger.warning(
"`past_key_values` were specified as input names, but model.config.use_cache = False, this might lead to "
"unexpected behavior."
)
if "past_key_values" not in input_names and getattr(model.config, "use_cache", False):
logger.warning(
"`past_key_values` were not specified as input names, but model.config.use_cache = True. Setting "
"model.config.use_cache = False."
)
model.config.use_cache = False
# Tracing.
tracer = tracer_cls()
traced_graph = tracer.trace(model, concrete_args=concrete_args)
traced = torch.fx.GraphModule(model, traced_graph)
traced.config = model.config
# The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus
# _generate_dummy_input, where the model class is needed.
traced.class_for_deserialization = model.__class__
traced.device = model.device
return traced
| transformers/src/transformers/utils/fx.py/0 | {
"file_path": "transformers/src/transformers/utils/fx.py",
"repo_id": "transformers",
"token_count": 25678
} | 561 |
<!---
Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
This folder contains a template to add a tokenization test.
## Usage
Using the `cookiecutter` utility requires to have all the `dev` dependencies installed.
Let's first [fork](https://docs.github.com/en/get-started/quickstart/fork-a-repo) the `transformers` repo on github. Once it's done you can clone your fork and install `transformers` in our environment:
```shell script
git clone https://github.com/YOUR-USERNAME/transformers
cd transformers
pip install -e ".[dev]"
```
Once the installation is done, you can generate the template by running the following command. Be careful, the template will be generated inside a new folder in your current working directory.
```shell script
cookiecutter path-to-the folder/adding_a_missing_tokenization_test/
```
You will then have to answer some questions about the tokenizer for which you want to add tests. The `modelname` should be cased according to the plain text casing, i.e., BERT, RoBERTa, DeBERTa.
Once the command has finished, you should have a one new file inside the newly created folder named `test_tokenization_Xxx.py`. At this point the template is finished and you can move it to the sub-folder of the corresponding model in the test folder.
| transformers/templates/adding_a_missing_tokenization_test/README.md/0 | {
"file_path": "transformers/templates/adding_a_missing_tokenization_test/README.md",
"repo_id": "transformers",
"token_count": 472
} | 562 |
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"bf16": {
"enabled": "auto"
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 2,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"allgather_partitions": true,
"allgather_bucket_size": 2e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 2e8,
"contiguous_gradients": true
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
| transformers/tests/deepspeed/ds_config_zero2.json/0 | {
"file_path": "transformers/tests/deepspeed/ds_config_zero2.json",
"repo_id": "transformers",
"token_count": 678
} | 563 |
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
from io import BytesIO
import numpy as np
import requests
from transformers import AriaProcessor
from transformers.models.auto.processing_auto import AutoProcessor
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from PIL import Image
@require_torch
@require_vision
class AriaProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = AriaProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
processor = AriaProcessor.from_pretrained("m-ric/Aria_hf_2", size_conversion={490: 2, 980: 2})
processor.save_pretrained(cls.tmpdirname)
cls.image1 = Image.open(
BytesIO(
requests.get(
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
).content
)
)
cls.image2 = Image.open(
BytesIO(requests.get("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg").content)
)
cls.image3 = Image.open(
BytesIO(
requests.get(
"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"
).content
)
)
cls.bos_token = "<|im_start|>"
cls.eos_token = "<|im_end|>"
cls.image_token = processor.tokenizer.image_token
cls.fake_image_token = "o"
cls.global_img_token = "<|img|>"
cls.bos_token_id = processor.tokenizer.convert_tokens_to_ids(cls.bos_token)
cls.eos_token_id = processor.tokenizer.convert_tokens_to_ids(cls.eos_token)
cls.image_token_id = processor.tokenizer.convert_tokens_to_ids(cls.image_token)
cls.fake_image_token_id = processor.tokenizer.convert_tokens_to_ids(cls.fake_image_token)
cls.global_img_tokens_id = processor.tokenizer(cls.global_img_token, add_special_tokens=False)["input_ids"]
cls.padding_token_id = processor.tokenizer.pad_token_id
cls.image_seq_len = 2
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}{% elif message['content'] is iterable %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<fim_prefix><|img|><fim_suffix>{% endif %}{% endfor %}{% endif %}<|im_end|>\n{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
"size_conversion": {490: 2, 980: 2},
} # fmt: skip
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
# Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens
def test_get_num_vision_tokens(self):
"Tests general functionality of the helper used internally in vLLM"
processor = self.get_processor()
output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)])
self.assertTrue("num_image_tokens" in output)
self.assertEqual(len(output["num_image_tokens"]), 3)
self.assertTrue("num_image_patches" in output)
self.assertEqual(len(output["num_image_patches"]), 3)
def test_process_interleaved_images_prompts_image_splitting(self):
processor = self.get_processor()
processor.image_processor.split_image = True
# Test that a single image is processed correctly
inputs = processor(images=self.image1, text="Ok<|img|>", images_kwargs={"split_image": True})
self.assertEqual(np.array(inputs["pixel_values"]).shape, (2, 3, 980, 980))
self.assertEqual(np.array(inputs["pixel_mask"]).shape, (2, 980, 980))
def test_process_interleaved_images_prompts_no_image_splitting(self):
processor = self.get_processor()
processor.image_processor.split_image = False
# Test that a single image is processed correctly
inputs = processor(images=self.image1, text="Ok<|img|>")
image1_expected_size = (980, 980)
self.assertEqual(np.array(inputs["pixel_values"]).shape, (1, 3, *image1_expected_size))
self.assertEqual(np.array(inputs["pixel_mask"]).shape, (1, *image1_expected_size))
# fmt: on
# Test a single sample with image and text
image_str = "<|img|>"
text_str = "In this image, we see"
text = image_str + text_str
inputs = processor(text=text, images=self.image1)
# fmt: off
tokenized_sentence = processor.tokenizer(text_str, add_special_tokens=False)
expected_input_ids = [[self.image_token_id] * self.image_seq_len + tokenized_sentence["input_ids"]]
# self.assertEqual(len(inputs["input_ids"]), len(expected_input_ids))
self.assertEqual(inputs["input_ids"], expected_input_ids)
self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])])
self.assertEqual(np.array(inputs["pixel_values"]).shape, (1, 3, *image1_expected_size))
self.assertEqual(np.array(inputs["pixel_mask"]).shape, (1, *image1_expected_size))
# fmt: on
# Test that batch is correctly processed
image_str = "<|img|>"
text_str_1 = "In this image, we see"
text_str_2 = "In this image, we see"
text = [
image_str + text_str_1,
image_str + image_str + text_str_2,
]
images = [[self.image1], [self.image2, self.image3]]
inputs = processor(text=text, images=images, padding=True)
# fmt: off
tokenized_sentence_1 = processor.tokenizer(text_str_1, add_special_tokens=False)
tokenized_sentence_2 = processor.tokenizer(text_str_2, add_special_tokens=False)
image_tokens = [self.image_token_id] * self.image_seq_len
expected_input_ids_1 = image_tokens + tokenized_sentence_1["input_ids"]
expected_input_ids_2 = 2 * image_tokens + tokenized_sentence_2["input_ids"]
# Pad the first input to match the second input
pad_len = len(expected_input_ids_2) - len(expected_input_ids_1)
expected_attention_mask = [[0] * pad_len + [1] * len(expected_input_ids_1), [1] * (len(expected_input_ids_2))]
self.assertEqual(
inputs["attention_mask"],
expected_attention_mask
)
self.assertEqual(np.array(inputs['pixel_values']).shape, (3, 3, 980, 980))
self.assertEqual(np.array(inputs['pixel_mask']).shape, (3, 980, 980))
# fmt: on
def test_non_nested_images_with_batched_text(self):
processor = self.get_processor()
processor.image_processor.do_image_splitting = False
image_str = "<|img|>"
text_str_1 = "In this image, we see"
text_str_2 = "In this image, we see"
text = [
image_str + text_str_1,
image_str + image_str + text_str_2,
]
images = [self.image1, self.image2, self.image3]
inputs = processor(text=text, images=images, padding=True)
self.assertEqual(np.array(inputs["pixel_values"]).shape, (3, 3, 980, 980))
self.assertEqual(np.array(inputs["pixel_mask"]).shape, (3, 980, 980))
def test_apply_chat_template(self):
# Message contains content which a mix of lists with images and image urls and string
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "What do these images show?"},
{"type": "image"},
{"type": "image"},
"What do these images show?",
],
},
{
"role": "assistant",
"content": [
{
"type": "text",
"text": "The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.",
}
],
},
{"role": "user", "content": [{"type": "text", "text": "And who is that?"}]},
]
processor = self.get_processor()
# Make short sequence length to test that the fake tokens are added correctly
rendered = processor.apply_chat_template(messages, add_generation_prompt=True)
print(rendered)
expected_rendered = """<|im_start|>user
What do these images show?<fim_prefix><|img|><fim_suffix><fim_prefix><|img|><fim_suffix><|im_end|>
<|im_start|>assistant
The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.<|im_end|>
<|im_start|>user
And who is that?<|im_end|>
<|im_start|>assistant
"""
self.assertEqual(rendered, expected_rendered)
def test_image_chat_template_accepts_processing_kwargs(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
messages = [
[
{
"role": "user",
"content": [
{"type": "text", "text": "What is shown in this image?"},
],
},
]
]
formatted_prompt_tokenized = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
max_length=50,
)
self.assertEqual(len(formatted_prompt_tokenized[0]), 50)
formatted_prompt_tokenized = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
truncation=True,
max_length=5,
)
self.assertEqual(len(formatted_prompt_tokenized[0]), 5)
# Now test the ability to return dict
messages[0][0]["content"].append(
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"}
)
out_dict = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
max_image_size=980,
return_tensors="np",
)
self.assertListEqual(list(out_dict[self.images_input_name].shape), [1, 3, 980, 980])
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=3,
)
| transformers/tests/models/aria/test_processing_aria.py/0 | {
"file_path": "transformers/tests/models/aria/test_processing_aria.py",
"repo_id": "transformers",
"token_count": 5662
} | 564 |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
from transformers import AutoProcessor, AutoTokenizer, AyaVisionProcessor
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_torch_available():
import torch
if is_vision_available():
from transformers import GotOcr2ImageProcessor
@require_vision
class AyaVisionProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = AyaVisionProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = GotOcr2ImageProcessor(
do_resize=True,
size={"height": 20, "width": 20},
max_patches=2,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=[0.485, 0.456, 0.406],
image_std=[0.229, 0.224, 0.225],
do_convert_rgb=True,
)
tokenizer = AutoTokenizer.from_pretrained(
"hf-internal-testing/namespace-CohereForAI-repo_name_aya-vision-8b", padding_side="left"
)
processor_kwargs = cls.prepare_processor_dict()
processor = AyaVisionProcessor.from_pretrained(
"hf-internal-testing/namespace-CohereForAI-repo_name_aya-vision-8b",
image_processor=image_processor,
tokenizer=tokenizer,
**processor_kwargs,
)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
@staticmethod
def prepare_processor_dict():
return {"patch_size": 10, "img_size": 20}
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
# Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens
def test_get_num_vision_tokens(self):
"Tests general functionality of the helper used internally in vLLM"
processor = self.get_processor()
output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)])
self.assertTrue("num_image_tokens" in output)
self.assertEqual(len(output["num_image_tokens"]), 3)
self.assertTrue("num_image_patches" in output)
self.assertEqual(len(output["num_image_patches"]), 3)
@require_torch
def test_process_interleaved_images_videos(self):
processor = self.get_processor()
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
{
"type": "image",
"url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg",
},
{"type": "text", "text": "What are the differences between these two images?"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://llava-vl.github.io/static/images/view.jpg",
},
{"type": "text", "text": "Write a haiku for this image"},
],
}
],
]
inputs_batched = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
)
# Process non batched inputs to check if the pixel_values and input_ids are reconstructed in the correct order when batched together
images_patches_index = 0
for i, message in enumerate(messages):
inputs = processor.apply_chat_template(
message,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
)
# We slice with [-inputs["input_ids"].shape[1] :] as the input_ids are left padded
torch.testing.assert_close(
inputs["input_ids"][0], inputs_batched["input_ids"][i][-inputs["input_ids"].shape[1] :]
)
torch.testing.assert_close(
inputs["pixel_values"],
inputs_batched["pixel_values"][
images_patches_index : images_patches_index + inputs["pixel_values"].shape[0]
],
)
images_patches_index += inputs["pixel_values"].shape[0]
| transformers/tests/models/aya_vision/test_processing_aya_vision.py/0 | {
"file_path": "transformers/tests/models/aya_vision/test_processing_aya_vision.py",
"repo_id": "transformers",
"token_count": 2868
} | 565 |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch BioGPT model."""
import math
import unittest
from transformers import BioGptConfig, is_sacremoses_available, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
class BioGptModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return BioGptConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BioGptModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_biogpt_model_attention_mask_past(self, config, input_ids, input_mask, token_type_ids, *args):
model = BioGptModel(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_biogpt_model_past_large_inputs(self, config, input_ids, input_mask, token_type_ids, *args):
model = BioGptModel(config=config).to(torch_device).eval()
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, token_type_ids, *args, gradient_checkpointing=False
):
model = BioGptForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_biogpt_weight_initialization(self, config, *args):
model = BioGptModel(config)
model_std = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def create_and_check_biogpt_for_token_classification(self, config, input_ids, input_mask, token_type_ids, *args):
config.num_labels = self.num_labels
model = BioGptForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class BioGptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available() and is_sacremoses_available()
else {}
)
test_pruning = False
def setUp(self):
self.model_tester = BioGptModelTester(self)
self.config_tester = ConfigTester(self, config_class=BioGptConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_biogpt_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*config_and_inputs)
def test_biogpt_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
def test_biogpt_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*config_and_inputs)
def test_biogpt_weight_initialization(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*config_and_inputs)
def test_biogpt_token_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*config_and_inputs)
@slow
def test_batch_generation(self):
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(torch_device)
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
tokenizer.padding_side = "left"
# Define PAD Token = EOS Token = 50256
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = model.config.eos_token_id
model.generation_config.pad_token_id = model.generation_config.eos_token_id
# use different length sentences to test batching
sentences = [
"Hello, my dog is a little",
"Today, I",
]
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
input_ids = inputs["input_ids"].to(torch_device)
outputs = model.generate(
input_ids=input_ids,
attention_mask=inputs["attention_mask"].to(torch_device),
max_new_tokens=10,
)
inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device)
output_non_padded = model.generate(input_ids=inputs_non_padded, max_new_tokens=10)
num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().item()
inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device)
output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings)
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
expected_output_sentence = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(expected_output_sentence, batch_out_sentence)
self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/biogpt"
model = BioGptModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# Copied from tests.models.opt.test_modeling_opt.OPTModelTest.test_opt_sequence_classification_model with OPT->BioGpt,opt->biogpt,prepare_config_and_inputs->prepare_config_and_inputs_for_common
def test_biogpt_sequence_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = BioGptForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
# Copied from tests.models.opt.test_modeling_opt.OPTModelTest.test_opt_sequence_classification_model_for_multi_label with OPT->BioGpt,opt->biogpt,prepare_config_and_inputs->prepare_config_and_inputs_for_common
def test_biogpt_sequence_classification_model_for_multi_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "multi_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size
).to(torch.float)
model = BioGptForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class BioGptModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_lm_head_model(self):
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
input_ids = torch.tensor([[2, 4805, 9, 656, 21]])
output = model(input_ids)[0]
vocab_size = 42384
expected_shape = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_biogpt_generation_beam_search(self):
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(torch_device)
torch.manual_seed(0)
tokenized = tokenizer("COVID-19 is", return_tensors="pt").to(torch_device)
output_ids = model.generate(
**tokenized,
min_length=100,
max_length=1024,
num_beams=5,
early_stopping=True,
)
output_str = tokenizer.decode(output_ids[0])
EXPECTED_OUTPUT_STR = (
"</s>"
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths. "
"</s>"
)
self.assertEqual(output_str, EXPECTED_OUTPUT_STR)
| transformers/tests/models/biogpt/test_modeling_biogpt.py/0 | {
"file_path": "transformers/tests/models/biogpt/test_modeling_biogpt.py",
"repo_id": "transformers",
"token_count": 8401
} | 566 |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Blip model."""
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import is_torch_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import BlipTextModel
class BlipTextModelTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
bos_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return BlipTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
bos_token_id=self.bos_token_id,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = BlipTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class BlipTextModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (BlipTextModel,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = BlipTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="Blip does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "Salesforce/blip-vqa-base"
model = BlipTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| transformers/tests/models/blip/test_modeling_blip_text.py/0 | {
"file_path": "transformers/tests/models/blip/test_modeling_blip_text.py",
"repo_id": "transformers",
"token_count": 2555
} | 567 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch CLAP model."""
import inspect
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import ClapAudioConfig, ClapConfig, ClapProcessor, ClapTextConfig
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import is_torch_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapTextModel,
ClapTextModelWithProjection,
)
class ClapAudioModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=60,
num_mel_bins=16,
window_size=4,
spec_size=64,
patch_size=2,
patch_stride=2,
seq_length=16,
freq_ratio=2,
num_channels=3,
is_training=True,
hidden_size=32,
patch_embeds_hidden_size=16,
projection_dim=32,
depths=[2, 2],
num_hidden_layers=2,
num_heads=[2, 2],
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_mel_bins = num_mel_bins
self.window_size = window_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.depths = depths
self.num_heads = num_heads
self.num_attention_heads = num_heads[0]
self.seq_length = seq_length
self.spec_size = spec_size
self.freq_ratio = freq_ratio
self.patch_stride = patch_stride
self.patch_embeds_hidden_size = patch_embeds_hidden_size
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_features = floats_tensor([self.batch_size, 1, self.hidden_size, self.num_mel_bins])
config = self.get_config()
return config, input_features
def get_config(self):
return ClapAudioConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_mel_bins=self.num_mel_bins,
window_size=self.window_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
patch_stride=self.patch_stride,
projection_dim=self.projection_dim,
depths=self.depths,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
spec_size=self.spec_size,
freq_ratio=self.freq_ratio,
patch_embeds_hidden_size=self.patch_embeds_hidden_size,
)
def create_and_check_model(self, config, input_features):
model = ClapAudioModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_features)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_with_projection(self, config, input_features):
model = ClapAudioModelWithProjection(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_features)
self.parent.assertEqual(result.audio_embeds.shape, (self.batch_size, self.projection_dim))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_features = config_and_inputs
inputs_dict = {"input_features": input_features}
return config, inputs_dict
@require_torch
class ClapAudioModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as CLAP does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (ClapAudioModel, ClapAudioModelWithProjection) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = ClapAudioModelTester(self)
self.config_tester = ConfigTester(self, config_class=ClapAudioConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="ClapAudioModel does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[2 * self.model_tester.patch_embeds_hidden_size, 2 * self.model_tester.patch_embeds_hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass")
def test_retain_grad_hidden_states_attentions(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_features"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_with_projection(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_projection(*config_and_inputs)
@unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass")
def test_training(self):
pass
@unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "laion/clap-htsat-fused"
model = ClapAudioModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
def test_model_with_projection_from_pretrained(self):
model_name = "laion/clap-htsat-fused"
model = ClapAudioModelWithProjection.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertTrue(hasattr(model, "audio_projection"))
class ClapTextModelTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
projection_hidden_act="relu",
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
self.projection_hidden_act = projection_hidden_act
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return ClapTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
projection_hidden_act=self.projection_hidden_act,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = ClapTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_with_projection(self, config, input_ids, input_mask):
model = ClapTextModelWithProjection(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.text_embeds.shape, (self.batch_size, self.projection_dim))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class ClapTextModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (ClapTextModel, ClapTextModelWithProjection) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = ClapTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=ClapTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_with_projection(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_projection(*config_and_inputs)
@unittest.skip(reason="ClapTextModel does not output any loss term in the forward pass")
def test_training(self):
pass
@unittest.skip(reason="ClapTextModel does not output any loss term in the forward pass")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="ClapTextModel does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "laion/clap-htsat-fused"
model = ClapTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
def test_model_with_projection_from_pretrained(self):
model_name = "laion/clap-htsat-fused"
model = ClapTextModelWithProjection.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertTrue(hasattr(model, "text_projection"))
class ClapModelTester:
def __init__(self, parent, text_kwargs=None, audio_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if audio_kwargs is None:
audio_kwargs = {}
self.parent = parent
self.text_model_tester = ClapTextModelTester(parent, **text_kwargs)
self.audio_model_tester = ClapAudioModelTester(parent, **audio_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.is_training = is_training
def prepare_config_and_inputs(self):
_, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
_, input_features = self.audio_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, input_features
def get_config(self):
return ClapConfig(
text_config=self.text_model_tester.get_config().to_dict(),
audio_config=self.audio_model_tester.get_config().to_dict(),
projection_dim=64,
)
def create_and_check_model(self, config, input_ids, attention_mask, input_features):
model = ClapModel(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, input_features, attention_mask)
self.parent.assertEqual(
result.logits_per_audio.shape, (self.audio_model_tester.batch_size, self.text_model_tester.batch_size)
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.text_model_tester.batch_size, self.audio_model_tester.batch_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, input_features = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"input_features": input_features,
"return_loss": True,
}
return config, inputs_dict
@require_torch
class ClapModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (ClapModel,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": ClapModel} if is_torch_available() else {}
fx_compatible = False
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_attention_outputs = False
def setUp(self):
self.model_tester = ClapModelTester(self)
common_properties = ["logit_scale_init_value", "projection_hidden_act", "projection_dim"]
self.config_tester = ConfigTester(
self, config_class=ClapConfig, has_text_modality=False, common_properties=common_properties
)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="ClapModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
# override as the `logit_scale` parameter initialization is different for CLAP
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initialized as per the original implementation
if "logit_scale" in name:
self.assertAlmostEqual(
param.data.item(),
np.log(1 / 0.07),
delta=1e-3,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
configs_no_init.return_dict = False
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
try:
input_ids = inputs_dict["input_ids"]
input_features = inputs_dict["input_features"] # CLAP needs input_features
traced_model = torch.jit.trace(model, (input_ids, input_features))
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
non_persistent_buffers = {}
for key in loaded_model_state_dict:
if key not in model_state_dict:
non_persistent_buffers[key] = loaded_model_state_dict[key]
loaded_model_state_dict = {
key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers
}
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_load_audio_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save ClapConfig and check if we can load ClapAudioConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
audio_config = ClapAudioConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.audio_config.to_dict(), audio_config.to_dict())
# Save ClapConfig and check if we can load ClapTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = ClapTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
@slow
def test_model_from_pretrained(self):
model_name = "laion/clap-htsat-fused"
model = ClapModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch
class ClapModelIntegrationTest(unittest.TestCase):
paddings = ["repeatpad", "repeat", "pad"]
def test_integration_unfused(self):
EXPECTED_MEANS_UNFUSED = {
"repeatpad": 0.0024,
"pad": 0.0020,
"repeat": 0.0023,
}
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_sample = librispeech_dummy[-1]
model_id = "laion/clap-htsat-unfused"
model = ClapModel.from_pretrained(model_id).to(torch_device)
processor = ClapProcessor.from_pretrained(model_id)
for padding in self.paddings:
inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt", padding=padding).to(
torch_device
)
audio_embed = model.get_audio_features(**inputs)
expected_mean = EXPECTED_MEANS_UNFUSED[padding]
self.assertTrue(
torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3)
)
def test_integration_fused(self):
EXPECTED_MEANS_FUSED = {
"repeatpad": 0.00069,
"repeat": 0.00196,
"pad": -0.000379,
}
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_sample = librispeech_dummy[-1]
model_id = "laion/clap-htsat-fused"
model = ClapModel.from_pretrained(model_id).to(torch_device)
processor = ClapProcessor.from_pretrained(model_id)
for padding in self.paddings:
inputs = processor(
audios=audio_sample["audio"]["array"], return_tensors="pt", padding=padding, truncation="fusion"
).to(torch_device)
audio_embed = model.get_audio_features(**inputs)
expected_mean = EXPECTED_MEANS_FUSED[padding]
self.assertTrue(
torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3)
)
def test_batched_fused(self):
EXPECTED_MEANS_FUSED = {
"repeatpad": 0.0010,
"repeat": 0.0020,
"pad": 0.0006,
}
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_samples = [sample["array"] for sample in librispeech_dummy[0:4]["audio"]]
model_id = "laion/clap-htsat-fused"
model = ClapModel.from_pretrained(model_id).to(torch_device)
processor = ClapProcessor.from_pretrained(model_id)
for padding in self.paddings:
inputs = processor(audios=audio_samples, return_tensors="pt", padding=padding, truncation="fusion").to(
torch_device
)
audio_embed = model.get_audio_features(**inputs)
expected_mean = EXPECTED_MEANS_FUSED[padding]
self.assertTrue(
torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3)
)
def test_batched_unfused(self):
EXPECTED_MEANS_FUSED = {
"repeatpad": 0.0016,
"repeat": 0.0019,
"pad": 0.0019,
}
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_samples = [sample["array"] for sample in librispeech_dummy[0:4]["audio"]]
model_id = "laion/clap-htsat-unfused"
model = ClapModel.from_pretrained(model_id).to(torch_device)
processor = ClapProcessor.from_pretrained(model_id)
for padding in self.paddings:
inputs = processor(audios=audio_samples, return_tensors="pt", padding=padding).to(torch_device)
audio_embed = model.get_audio_features(**inputs)
expected_mean = EXPECTED_MEANS_FUSED[padding]
self.assertTrue(
torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3)
)
| transformers/tests/models/clap/test_modeling_clap.py/0 | {
"file_path": "transformers/tests/models/clap/test_modeling_clap.py",
"repo_id": "transformers",
"token_count": 13086
} | 568 |
# Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch CPMAnt model."""
import unittest
from transformers.testing_utils import is_torch_available, require_torch, tooslow
from ...generation.test_utils import torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CpmAntConfig,
CpmAntForCausalLM,
CpmAntModel,
CpmAntTokenizer,
)
@require_torch
class CpmAntModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=8,
is_training=True,
use_token_type_ids=False,
use_input_mask=False,
use_labels=False,
use_mc_token_ids=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
num_buckets=32,
max_distance=128,
prompt_length=8,
prompt_types=8,
segment_types=8,
init_std=0.02,
return_dict=True,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.num_buckets = num_buckets
self.max_distance = max_distance
self.prompt_length = prompt_length
self.prompt_types = prompt_types
self.segment_types = segment_types
self.init_std = init_std
self.return_dict = return_dict
def prepare_config_and_inputs(self):
input_ids = {}
input_ids["input_ids"] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).type(torch.int32)
input_ids["use_cache"] = False
config = self.get_config()
return (config, input_ids)
def get_config(self):
return CpmAntConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
dim_ff=self.intermediate_size,
position_bias_num_buckets=self.num_buckets,
position_bias_max_distance=self.max_distance,
prompt_types=self.prompt_types,
prompt_length=self.prompt_length,
segment_types=self.segment_types,
use_cache=True,
init_std=self.init_std,
return_dict=self.return_dict,
)
def create_and_check_cpmant_model(self, config, input_ids, *args):
model = CpmAntModel(config=config)
model.to(torch_device)
model.eval()
hidden_states = model(**input_ids).last_hidden_state
self.parent.assertEqual(hidden_states.shape, (self.batch_size, self.seq_length, config.hidden_size))
def create_and_check_lm_head_model(self, config, input_ids, *args):
model = CpmAntForCausalLM(config)
model.to(torch_device)
input_ids["input_ids"] = input_ids["input_ids"].to(torch_device)
model.eval()
model_output = model(**input_ids)
self.parent.assertEqual(
model_output.logits.shape,
(self.batch_size, self.seq_length, config.vocab_size + config.prompt_types * config.prompt_length),
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
@require_torch
class CpmAntModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (CpmAntModel, CpmAntForCausalLM) if is_torch_available() else ()
# Doesn't run generation tests. There are interface mismatches when using `generate` -- TODO @gante
all_generative_model_classes = ()
pipeline_model_mapping = (
{"feature-extraction": CpmAntModel, "text-generation": CpmAntForCausalLM} if is_torch_available() else {}
)
test_pruning = False
test_missing_keys = False
test_mismatched_shapes = False
test_head_masking = False
test_resize_embeddings = False
def setUp(self):
self.model_tester = CpmAntModelTester(self)
self.config_tester = ConfigTester(self, config_class=CpmAntConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
unittest.skip(reason="CPMAnt doesn't support input_embeds.")(self.test_inputs_embeds)
def test_retain_grad_hidden_states_attentions(self):
unittest.skip(
"CPMAnt doesn't support retain grad in hidden_states or attentions, because prompt management will peel off the output.hidden_states from graph.\
So is attentions. We strongly recommend you use loss to tune model."
)(self.test_retain_grad_hidden_states_attentions)
def test_cpmant_model(self):
config, inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_cpmant_model(config, inputs)
def test_cpmant_lm_head_model(self):
config, inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(config, inputs)
@require_torch
class CpmAntModelIntegrationTest(unittest.TestCase):
@tooslow
def test_inference_masked_lm(self):
texts = "今天天气真好!"
model_path = "openbmb/cpm-ant-10b"
model = CpmAntModel.from_pretrained(model_path)
tokenizer = CpmAntTokenizer.from_pretrained(model_path)
inputs = tokenizer(texts, return_tensors="pt")
hidden_states = model(**inputs).last_hidden_state
expected_slice = torch.tensor(
[[[6.1708, 5.9244, 1.0835], [6.5207, 6.2893, -11.3324], [-1.0107, -0.0576, -5.9577]]],
)
torch.testing.assert_close(hidden_states[:, :3, :3], expected_slice, rtol=1e-2, atol=1e-2)
@require_torch
class CpmAntForCausalLMlIntegrationTest(unittest.TestCase):
@tooslow
def test_inference_casual(self):
texts = "今天天气真好!"
model_path = "openbmb/cpm-ant-10b"
model = CpmAntForCausalLM.from_pretrained(model_path)
tokenizer = CpmAntTokenizer.from_pretrained(model_path)
inputs = tokenizer(texts, return_tensors="pt")
hidden_states = model(**inputs).logits
expected_slice = torch.tensor(
[[[-6.4267, -6.4083, -6.3958], [-5.8802, -5.9447, -5.7811], [-5.3896, -5.4820, -5.4295]]],
)
torch.testing.assert_close(hidden_states[:, :3, :3], expected_slice, rtol=1e-2, atol=1e-2)
@tooslow
def test_simple_generation(self):
model_path = "openbmb/cpm-ant-10b"
model = CpmAntForCausalLM.from_pretrained(model_path)
tokenizer = CpmAntTokenizer.from_pretrained(model_path)
texts = "今天天气不错,"
expected_output = "今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的"
model_inputs = tokenizer(texts, return_tensors="pt")
token_ids = model.generate(**model_inputs)
output_texts = tokenizer.batch_decode(token_ids)
self.assertEqual(expected_output, output_texts)
@tooslow
def test_batch_generation(self):
model_path = "openbmb/cpm-ant-10b"
model = CpmAntForCausalLM.from_pretrained(model_path)
tokenizer = CpmAntTokenizer.from_pretrained(model_path)
texts = ["今天天气不错,", "新年快乐,万事如意!"]
expected_output = [
"今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的",
"新年快乐,万事如意!在这辞旧迎新的美好时刻,我谨代表《农村新技术》杂志社全体同仁,向一直以来关心、支持《农村新技术》杂志发展的各级领导、各界朋友和广大读者致以最诚挚的",
]
model_inputs = tokenizer(texts, return_tensors="pt", padding=True)
token_ids = model.generate(**model_inputs)
output_texts = tokenizer.batch_decode(token_ids)
self.assertEqual(expected_output, output_texts)
| transformers/tests/models/cpmant/test_modeling_cpmant.py/0 | {
"file_path": "transformers/tests/models/cpmant/test_modeling_cpmant.py",
"repo_id": "transformers",
"token_count": 4360
} | 569 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Dac model."""
import inspect
import os
import tempfile
import unittest
import numpy as np
from datasets import Audio, load_dataset
from parameterized import parameterized
from transformers import AutoProcessor, DacConfig, DacModel
from transformers.testing_utils import is_torch_available, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
@require_torch
# Copied from transformers.tests.encodec.test_modeling_encodec.EncodecModelTester with Encodec->Dac
class DacModelTester:
# Ignore copy
def __init__(
self,
parent,
batch_size=3,
num_channels=1,
is_training=False,
intermediate_size=1024,
encoder_hidden_size=16,
downsampling_ratios=[2, 4, 4],
decoder_hidden_size=16,
n_codebooks=6,
codebook_size=512,
codebook_dim=4,
quantizer_dropout=0.0,
commitment_loss_weight=0.25,
codebook_loss_weight=1.0,
sample_rate=16000,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.is_training = is_training
self.intermediate_size = intermediate_size
self.sample_rate = sample_rate
self.encoder_hidden_size = encoder_hidden_size
self.downsampling_ratios = downsampling_ratios
self.decoder_hidden_size = decoder_hidden_size
self.n_codebooks = n_codebooks
self.codebook_size = codebook_size
self.codebook_dim = codebook_dim
self.quantizer_dropout = quantizer_dropout
self.commitment_loss_weight = commitment_loss_weight
self.codebook_loss_weight = codebook_loss_weight
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0)
config = self.get_config()
inputs_dict = {"input_values": input_values}
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def prepare_config_and_inputs_for_model_class(self, model_class):
input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0)
config = self.get_config()
inputs_dict = {"input_values": input_values}
return config, inputs_dict
# Ignore copy
def get_config(self):
return DacConfig(
encoder_hidden_size=self.encoder_hidden_size,
downsampling_ratios=self.downsampling_ratios,
decoder_hidden_size=self.decoder_hidden_size,
n_codebooks=self.n_codebooks,
codebook_size=self.codebook_size,
codebook_dim=self.codebook_dim,
quantizer_dropout=self.quantizer_dropout,
commitment_loss_weight=self.commitment_loss_weight,
codebook_loss_weight=self.codebook_loss_weight,
)
# Ignore copy
def create_and_check_model_forward(self, config, inputs_dict):
model = DacModel(config=config).to(torch_device).eval()
input_values = inputs_dict["input_values"]
result = model(input_values)
self.parent.assertEqual(result.audio_values.shape, (self.batch_size, self.intermediate_size))
@require_torch
# Copied from transformers.tests.encodec.test_modeling_encodec.EncodecModelTest with Encodec->Dac
class DacModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DacModel,) if is_torch_available() else ()
is_encoder_decoder = True
test_pruning = False
test_headmasking = False
test_resize_embeddings = False
pipeline_model_mapping = {"feature-extraction": DacModel} if is_torch_available() else {}
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
# model does not have attention and does not support returning hidden states
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if "output_attentions" in inputs_dict:
inputs_dict.pop("output_attentions")
if "output_hidden_states" in inputs_dict:
inputs_dict.pop("output_hidden_states")
return inputs_dict
def setUp(self):
self.model_tester = DacModelTester(self)
self.config_tester = ConfigTester(
self, config_class=DacConfig, hidden_size=37, common_properties=[], has_text_modality=False
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
# TODO (ydshieh): Although we have a potential cause, it's still strange that this test fails all the time with large differences
@unittest.skip(reason="Might be caused by `indices` computed with `max()` in `decode_latents`")
def test_batching_equivalence(self):
super().test_batching_equivalence()
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
# Ignore copy
expected_arg_names = ["input_values", "n_quantizers", "return_dict"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
@unittest.skip("The DacModel is not transformers based, thus it does not have `inputs_embeds` logics")
def test_inputs_embeds(self):
pass
@unittest.skip("The DacModel is not transformers based, thus it does not have `inputs_embeds` logics")
def test_model_get_set_embeddings(self):
pass
@unittest.skip("The DacModel is not transformers based, thus it does not have the usual `attention` logic")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip("The DacModel is not transformers based, thus it does not have the usual `attention` logic")
def test_torchscript_output_attentions(self):
pass
@unittest.skip("The DacModel is not transformers based, thus it does not have the usual `hidden_states` logic")
def test_torchscript_output_hidden_state(self):
pass
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
configs_no_init.return_dict = False
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
main_input_name = model_class.main_input_name
try:
main_input = inputs[main_input_name]
model(main_input)
traced_model = torch.jit.trace(model, main_input)
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
non_persistent_buffers = {}
for key in loaded_model_state_dict:
if key not in model_state_dict:
non_persistent_buffers[key] = loaded_model_state_dict[key]
loaded_model_state_dict = {
key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers
}
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
models_equal = True
for layer_name, p1 in model_state_dict.items():
if layer_name in loaded_model_state_dict:
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
# Avoid memory leak. Without this, each call increase RAM usage by ~20MB.
# (Even with this call, there are still memory leak by ~0.04MB)
self.clear_torch_jit_class_registry()
@unittest.skip("The DacModel is not transformers based, thus it does not have the usual `attention` logic")
def test_attention_outputs(self):
pass
@unittest.skip("The DacModel is not transformers based, thus it does not have the usual `hidden_states` logic")
def test_hidden_states_output(self):
pass
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_determinism(first, second):
# outputs are not tensors but list (since each sequence don't have the same frame_length)
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
if isinstance(first, tuple) and isinstance(second, tuple):
for tensor1, tensor2 in zip(first, second):
check_determinism(tensor1, tensor2)
else:
check_determinism(first, second)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (list, tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
# Ignore copy
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = ["conv", "in_proj", "out_proj", "codebook"]
if param.requires_grad:
if any(x in name for x in uniform_init_parms):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def test_identity_shortcut(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
config.use_conv_shortcut = False
self.model_tester.create_and_check_model_forward(config, inputs_dict)
# Copied from transformers.tests.encodec.test_modeling_encodec.normalize
def normalize(arr):
norm = np.linalg.norm(arr)
normalized_arr = arr / norm
return normalized_arr
# Copied from transformers.tests.encodec.test_modeling_encodec.compute_rmse
def compute_rmse(arr1, arr2):
arr1_np = arr1.cpu().numpy().squeeze()
arr2_np = arr2.cpu().numpy().squeeze()
max_length = min(arr1.shape[-1], arr2.shape[-1])
arr1_np = arr1_np[..., :max_length]
arr2_np = arr2_np[..., :max_length]
arr1_normalized = normalize(arr1_np)
arr2_normalized = normalize(arr2_np)
return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean())
"""
Integration tests for DAC.
Code for reproducing expected outputs can be found here:
- test_integration: https://gist.github.com/ebezzam/bb315efa7a416db6336a6b2a2d424ffa#file-test_dac-py
- test_batch: https://gist.github.com/ebezzam/bb315efa7a416db6336a6b2a2d424ffa#file-test_dac_batch-py
See https://github.com/huggingface/transformers/pull/39313 for reason behind large tolerance between for encoder
and decoder outputs (1e-3). In summary, original model uses weight normalization, while Transformers does not. This
leads to accumulating error. However, this does not affect the quantizer codes, thanks to discretization being
robust to precision errors. Moreover, codec error is similar between Transformers and original.
Moreover, here is a script to debug outputs and weights layer-by-layer:
https://gist.github.com/ebezzam/bb315efa7a416db6336a6b2a2d424ffa#file-dac_layer_by_layer_debugging-py
"""
# fmt: off
# -- test_integration
EXPECTED_PREPROC_SHAPE = {
"dac_16khz": torch.tensor([1, 1, 93760]),
"dac_24khz": torch.tensor([1, 1, 140800]),
"dac_44khz": torch.tensor([1, 1, 258560]),
}
EXPECTED_ENC_LOSS = {
"dac_16khz": 24.8767,
"dac_24khz": 27.6831,
"dac_44khz": 23.8870,
}
EXPECTED_QUANT_CODES = {
"dac_16khz": torch.tensor(
[
[
[804, 25, 536, 52, 68, 867, 388, 653, 315, 706, 301, 305, 752, 25, 40],
[955, 955, 134, 601, 431, 375, 967, 56, 54, 261, 871, 552, 232, 341, 228],
[977, 701, 172, 927, 617, 765, 790, 149, 403, 707, 511, 226, 254, 883, 644],
[467, 85, 828, 54, 211, 1007, 906, 253, 677, 1007, 302, 577, 644, 330, 778],
[189, 865, 586, 321, 116, 357, 911, 865, 1000, 234, 6, 901, 6, 470, 895],
[454, 241, 67, 622, 487, 426, 749, 833, 382, 900, 372, 959, 622, 305, 964],
[175, 609, 730, 307, 961, 609, 318, 1011, 386, 949, 343, 899, 657, 609, 38],
[82, 92, 692, 83, 131, 866, 483, 362, 519, 531, 853, 121, 404, 224, 710],
[1003, 260, 431, 460, 827, 927, 81, 76, 629, 298, 168, 177, 466, 741, 762],
[196, 203, 594, 394, 198, 560, 952, 437, 222, 992, 934, 316, 497, 31, 538],
[129, 715, 393, 635, 246, 716, 908, 384, 962, 873, 92, 254, 592, 496, 83],
[257, 502, 606, 204, 993, 428, 176, 395, 901, 323, 342, 849, 226, 453, 513],
]
]
).to(torch_device),
"dac_24khz": torch.tensor(
[
[
[252, 851, 919, 204, 239, 360, 160, 103, 851, 876, 160, 793, 103, 234, 665],
[908, 658, 479, 556, 847, 738, 395, 124, 847, 496, 623, 77, 9, 497, 117],
[385, 278, 221, 1020, 408, 330, 70, 215, 80, 84, 320, 998, 931, 470, 944],
[383, 259, 271, 348, 179, 304, 634, 282, 788, 542, 356, 760, 297, 903, 623],
[487, 159, 414, 947, 608, 685, 101, 74, 372, 823, 417, 866, 671, 589, 901],
[692, 175, 508, 54, 85, 383, 787, 629, 844, 7, 511, 382, 383, 643, 134],
[652, 895, 846, 766, 326, 640, 852, 365, 887, 126, 216, 224, 568, 1008, 635],
[938, 285, 570, 515, 574, 515, 862, 644, 845, 207, 603, 830, 193, 158, 79],
[847, 825, 874, 991, 384, 509, 1008, 308, 579, 487, 976, 651, 932, 692, 860],
[220, 392, 307, 397, 705, 876, 273, 438, 411, 449, 573, 393, 543, 709, 303],
[394, 773, 144, 254, 832, 586, 790, 941, 501, 502, 351, 907, 915, 148, 141],
[447, 985, 930, 175, 196, 854, 968, 494, 899, 637, 136, 937, 395, 364, 1000],
[677, 690, 428, 756, 471, 225, 763, 124, 333, 23, 821, 644, 635, 130, 475],
[932, 589, 436, 548, 555, 53, 466, 280, 598, 689, 400, 194, 73, 619, 450],
[592, 402, 177, 731, 693, 618, 871, 177, 761, 222, 927, 986, 676, 197, 658],
[192, 560, 368, 729, 626, 656, 174, 271, 383, 345, 381, 567, 467, 970, 794],
[834, 92, 990, 380, 146, 286, 644, 929, 173, 292, 1008, 948, 281, 973, 366],
[892, 533, 350, 589, 355, 163, 561, 229, 655, 240, 316, 926, 385, 624, 178],
[36, 385, 589, 342, 143, 517, 648, 94, 457, 217, 892, 60, 355, 46, 253],
[934, 939, 457, 5, 668, 323, 312, 825, 448, 697, 374, 199, 98, 955, 884],
[567, 297, 40, 498, 313, 86, 832, 270, 21, 609, 200, 688, 168, 616, 706],
[178, 559, 922, 627, 651, 19, 589, 475, 312, 898, 508, 969, 36, 783, 64],
[169, 981, 86, 4, 598, 988, 670, 480, 68, 235, 873, 130, 479, 543, 669],
[981, 575, 827, 149, 224, 572, 470, 265, 504, 654, 586, 835, 444, 497, 198],
[856, 913, 658, 664, 883, 771, 646, 56, 440, 482, 707, 229, 864, 286, 252],
[103, 568, 68, 904, 882, 239, 67, 112, 941, 457, 397, 412, 634, 1018, 626],
[933, 908, 96, 316, 842, 842, 241, 600, 504, 765, 288, 520, 312, 847, 207],
[969, 255, 492, 868, 927, 951, 170, 607, 720, 234, 478, 482, 119, 376, 10],
[716, 727, 375, 904, 176, 667, 729, 590, 391, 364, 685, 975, 186, 195, 593],
[164, 923, 485, 139, 571, 968, 718, 305, 62, 828, 0, 177, 827, 368, 379],
[416, 151, 83, 822, 640, 414, 969, 128, 667, 297, 129, 907, 938, 142, 547],
[623, 263, 408, 922, 947, 916, 705, 475, 360, 68, 858, 679, 601, 737, 268],
]
]
).to(torch_device),
"dac_44khz": torch.tensor(
[
[
[698, 315, 105, 315, 330, 105, 105, 698, 315, 481, 330, 93, 629, 315, 105],
[30, 232, 249, 881, 962, 365, 56, 881, 186, 402, 311, 521, 558, 778, 254],
[1022, 22, 361, 491, 233, 419, 909, 456, 456, 471, 420, 569, 455, 491, 16],
[599, 143, 641, 352, 40, 556, 860, 780, 138, 137, 304, 563, 863, 174, 370],
[485, 350, 242, 555, 174, 581, 666, 744, 559, 810, 127, 558, 453, 90, 124],
[851, 423, 706, 178, 36, 564, 650, 539, 733, 720, 18, 265, 619, 545, 581],
[755, 891, 628, 674, 724, 764, 420, 51, 566, 315, 178, 881, 461, 111, 675],
[52, 995, 512, 139, 538, 666, 1017, 868, 619, 0, 449, 1005, 982, 106, 139],
[357, 180, 368, 892, 856, 567, 960, 148, 36, 708, 945, 285, 531, 331, 440],
]
]
).to(torch_device),
}
EXPECTED_DEC_OUTPUTS = {
"dac_16khz": torch.tensor([[ 0.0002, 0.0007, 0.0012, 0.0015, 0.0017, 0.0011, 0.0004, -0.0002,
-0.0003, 0.0002, 0.0006, 0.0012, 0.0020, 0.0029, 0.0026, 0.0015,
0.0015, 0.0014, 0.0010, 0.0011, 0.0019, 0.0026, 0.0028, 0.0032,
0.0040, 0.0031, 0.0022, 0.0025, 0.0020, 0.0010, 0.0001, 0.0001,
0.0007, 0.0016, 0.0024, 0.0024, 0.0017, 0.0002, -0.0006, -0.0002,
0.0003, 0.0006, 0.0011, 0.0023, 0.0020, 0.0016, 0.0015, 0.0012,
0.0005, -0.0003]]).to(torch_device),
"dac_24khz": torch.tensor([[ 1.8275e-04, 1.8167e-04, -3.1626e-05, -6.4468e-05, 2.1254e-04,
8.4161e-04, 1.5839e-03, 1.6693e-03, 1.5439e-03, 1.3923e-03,
1.1167e-03, 6.2019e-04, -1.2014e-04, -5.7301e-04, -1.7829e-04,
6.0980e-04, 6.7130e-04, 1.6166e-04, -6.9366e-06, 3.1507e-04,
6.3976e-04, 7.1702e-04, 6.3391e-04, 5.7553e-04, 1.1151e-03,
1.9032e-03, 1.9737e-03, 1.2812e-03, 5.6187e-04, 3.9073e-04,
3.8875e-04, 3.0256e-04, 3.8140e-04, 7.6331e-04, 1.3098e-03,
1.7796e-03, 2.1707e-03, 2.5330e-03, 2.9214e-03, 3.0557e-03,
2.7402e-03, 2.2303e-03, 1.8196e-03, 1.6796e-03, 1.6199e-03,
1.0460e-03, 3.5502e-04, 2.8095e-04, 3.8291e-04, 2.2683e-04]]).to(torch_device),
"dac_44khz": torch.tensor([[ 1.3282e-03, 1.4784e-03, 1.6923e-03, 1.8359e-03, 1.8795e-03,
1.9519e-03, 1.9145e-03, 1.7839e-03, 1.5222e-03, 1.2423e-03,
9.9689e-04, 8.4000e-04, 7.6656e-04, 7.7500e-04, 7.7684e-04,
6.9986e-04, 5.3156e-04, 3.2828e-04, 1.7750e-04, 1.6440e-04,
2.9904e-04, 5.4582e-04, 8.2008e-04, 1.0400e-03, 1.1518e-03,
1.1718e-03, 1.1220e-03, 1.0717e-03, 1.0772e-03, 1.1534e-03,
1.3257e-03, 1.5572e-03, 1.7794e-03, 1.9112e-03, 1.9242e-03,
1.7837e-03, 1.5347e-03, 1.2386e-03, 9.3313e-04, 6.4671e-04,
3.5892e-04, 8.4733e-05, -1.6930e-04, -3.9932e-04, -5.8345e-04,
-6.9382e-04, -7.0792e-04, -5.6856e-04, -2.6751e-04, 1.5914e-04]]).to(torch_device),
}
EXPECTED_QUANT_CODEBOOK_LOSS = {
"dac_16khz": 20.7299,
"dac_24khz": 22.6602,
"dac_44khz": 16.2168,
}
EXPECTED_CODEC_ERROR = {
"dac_16khz": 0.003831653157249093,
"dac_24khz": 0.0025609051808714867,
"dac_44khz": 0.0007433777209371328,
}
# -- test_batch
EXPECTED_PREPROC_SHAPE_BATCH = {
"dac_16khz": torch.tensor([2, 1, 113920]),
"dac_24khz": torch.tensor([2, 1, 170880]),
"dac_44khz": torch.tensor([2, 1, 313856]),
}
EXPECTED_ENC_LOSS_BATCH = {
"dac_16khz": 20.3752,
"dac_24khz": 23.5663,
"dac_44khz": 19.5858,
}
EXPECTED_QUANT_CODES_BATCH = {
"dac_16khz": torch.tensor(
[
[
[490, 664, 726, 166, 55, 379, 367, 664, 661, 726, 592, 301, 130, 198, 129],
[1020, 734, 23, 53, 134, 648, 549, 589, 790, 1000, 449, 271, 1021, 740, 36],
[701, 344, 955, 19, 927, 212, 212, 667, 212, 627, 453, 954, 777, 706, 496],
[526, 805, 444, 474, 870, 920, 394, 823, 814, 1021, 763, 677, 251, 485, 1021],
[721, 134, 280, 439, 287, 77, 175, 902, 973, 412, 739, 953, 130, 75, 543],
[675, 316, 285, 341, 783, 850, 131, 487, 701, 150, 749, 730, 900, 481, 498],
[377, 37, 237, 489, 55, 246, 427, 456, 755, 1011, 712, 631, 695, 576, 804],
[601, 557, 681, 52, 10, 299, 284, 216, 869, 276, 424, 364, 955, 41, 497],
[465, 553, 697, 59, 701, 195, 335, 225, 896, 804, 776, 928, 392, 192, 332],
[807, 306, 977, 801, 77, 172, 760, 747, 445, 38, 731, 31, 924, 724, 835],
[903, 561, 205, 421, 231, 873, 931, 361, 679, 854, 471, 884, 1011, 857, 248],
[490, 993, 122, 787, 178, 307, 141, 468, 652, 786, 879, 885, 226, 343, 501],
],
[
[140, 320, 210, 489, 444, 320, 210, 73, 821, 1004, 388, 686, 405, 563, 407],
[725, 449, 802, 85, 36, 532, 620, 28, 620, 418, 146, 532, 418, 453, 565],
[695, 725, 600, 371, 829, 1008, 911, 927, 181, 707, 306, 337, 254, 577, 289],
[51, 648, 186, 129, 781, 968, 737, 563, 400, 839, 674, 689, 544, 767, 577],
[1007, 234, 145, 966, 734, 748, 68, 272, 473, 973, 414, 586, 618, 6, 909],
[410, 566, 507, 756, 943, 1008, 269, 349, 549, 320, 303, 729, 507, 741, 76],
[172, 102, 548, 714, 225, 173, 149, 423, 307, 527, 844, 102, 747, 76, 586],
[656, 144, 407, 245, 140, 925, 48, 197, 126, 418, 112, 674, 582, 916, 223],
[776, 971, 291, 781, 833, 688, 817, 261, 937, 467, 352, 463, 530, 804, 683],
[1009, 284, 427, 907, 900, 875, 279, 285, 878, 315, 734, 751, 337, 699, 966],
[389, 748, 203, 585, 609, 565, 555, 64, 154, 443, 16, 139, 905, 172, 86],
[884, 34, 477, 1013, 335, 493, 724, 202, 356, 199, 728, 552, 755, 223, 371],
],
]
).to(torch_device),
"dac_24khz": torch.tensor(
[
[
[234, 322, 826, 360, 204, 208, 766, 826, 458, 322, 919, 999, 360, 772, 204],
[117, 201, 229, 497, 9, 663, 1002, 243, 556, 300, 781, 496, 77, 780, 781],
[554, 342, 401, 553, 728, 196, 181, 109, 949, 528, 39, 558, 180, 5, 197],
[112, 408, 186, 933, 543, 829, 724, 1001, 425, 39, 163, 517, 986, 348, 653],
[ 88, 207, 671, 551, 742, 231, 870, 577, 353, 1016, 259, 282, 247, 126, 63],
[924, 59, 799, 739, 771, 568, 280, 673, 639, 1002, 35, 143, 270, 749, 571],
[214, 982, 904, 666, 819, 67, 161, 373, 945, 871, 597, 466, 388, 898, 584],
[696, 357, 188, 969, 213, 162, 376, 35, 638, 657, 731, 991, 625, 833, 801],
[559, 885, 343, 621, 752, 319, 292, 389, 947, 776, 78, 585, 193, 834, 622],
[642, 144, 680, 819, 303, 832, 56, 683, 366, 996, 609, 784, 305, 621, 36],
[517, 766, 69, 768, 219, 126, 945, 798, 568, 554, 115, 245, 31, 384, 167],
[424, 684, 371, 447, 50, 309, 407, 121, 839, 1019, 816, 423, 604, 489, 738],
[274, 490, 578, 353, 517, 283, 927, 432, 464, 608, 927, 32, 240, 852, 326],
[737, 226, 450, 862, 549, 799, 887, 925, 392, 841, 539, 633, 351, 7, 386],
[624, 497, 586, 937, 516, 898, 768, 188, 420, 173, 116, 602, 786, 940, 56],
[430, 927, 322, 885, 367, 175, 691, 337, 21, 796, 317, 826, 109, 604, 54],
[917, 854, 118, 231, 567, 332, 827, 422, 339, 958, 529, 63, 992, 597, 428],
[468, 619, 605, 598, 912, 1012, 365, 60, 538, 915, 22, 675, 460, 667, 255],
[912, 373, 355, 92, 920, 454, 979, 414, 645, 442, 783, 956, 693, 457, 842],
[230, 0, 998, 958, 159, 159, 332, 94, 886, 1, 455, 981, 418, 758, 358],
[132, 843, 1008, 626, 776, 342, 53, 362, 636, 997, 22, 36, 997, 12, 374],
[135, 408, 802, 456, 645, 899, 15, 447, 857, 265, 185, 983, 1018, 282, 607],
[272, 467, 461, 358, 389, 792, 385, 339, 50, 888, 63, 3, 792, 588, 972],
[179, 180, 212, 656, 60, 73, 261, 644, 755, 496, 137, 948, 879, 361, 863],
[739, 588, 948, 452, 297, 1009, 49, 725, 853, 843, 249, 957, 1008, 730, 860],
[174, 125, 519, 975, 686, 404, 321, 668, 38, 138, 424, 457, 98, 736, 1004],
[ 68, 262, 289, 299, 1022, 170, 865, 869, 951, 839, 100, 301, 828, 62, 511],
[509, 693, 235, 208, 668, 777, 284, 832, 376, 203, 784, 101, 344, 587, 736],
[121, 83, 484, 951, 839, 180, 801, 363, 890, 373, 206, 467, 524, 572, 614],
[146, 297, 674, 895, 740, 179, 782, 521, 721, 815, 85, 74, 179, 650, 554],
[708, 166, 203, 1021, 89, 991, 410, 117, 1019, 742, 235, 810, 782, 623, 176],
[358, 999, 360, 260, 278, 582, 921, 314, 242, 667, 21, 463, 335, 566, 897],
],
[
[851, 360, 851, 877, 665, 322, 581, 936, 826, 910, 110, 110, 160, 103, 204],
[325, 260, 722, 260, 549, 20, 508, 455, 221, 631, 846, 658, 457, 124, 496],
[529, 367, 767, 408, 628, 190, 80, 460, 351, 209, 768, 255, 655, 759, 605],
[344, 192, 255, 271, 402, 930, 805, 939, 497, 94, 843, 38, 96, 140, 760],
[415, 65, 953, 337, 599, 358, 520, 477, 602, 539, 443, 703, 124, 110, 92],
[514, 847, 606, 1014, 678, 806, 563, 408, 520, 4, 208, 83, 630, 176, 423],
[768, 741, 546, 353, 968, 371, 527, 447, 21, 746, 343, 100, 286, 708, 781],
[461, 499, 836, 411, 271, 279, 530, 882, 345, 1001, 828, 270, 733, 74, 709],
[539, 706, 278, 343, 235, 754, 346, 272, 52, 987, 151, 74, 757, 408, 623],
[668, 754, 817, 872, 526, 479, 889, 24, 297, 482, 162, 414, 128, 811, 488],
[973, 938, 874, 855, 767, 419, 378, 832, 745, 820, 957, 364, 389, 976, 301],
[162, 174, 830, 67, 749, 433, 428, 405, 63, 632, 391, 750, 518, 452, 743],
[ 5, 694, 393, 322, 563, 425, 306, 211, 870, 302, 491, 694, 324, 142, 997],
[981, 953, 116, 51, 674, 451, 351, 335, 285, 44, 591, 147, 124, 212, 957],
[813, 80, 700, 675, 964, 355, 137, 104, 679, 151, 88, 553, 815, 820, 21],
[398, 102, 563, 720, 304, 299, 1009, 606, 186, 52, 1012, 807, 999, 642, 901],
[405, 522, 668, 526, 657, 762, 624, 636, 358, 570, 572, 169, 580, 567, 939],
[153, 712, 786, 553, 210, 472, 327, 759, 51, 153, 833, 22, 800, 777, 283],
[324, 45, 757, 563, 703, 888, 256, 447, 515, 313, 94, 345, 295, 596, 132],
[792, 242, 242, 225, 229, 1004, 436, 61, 869, 757, 945, 1004, 122, 914, 989],
[595, 902, 56, 961, 722, 731, 937, 332, 706, 30, 372, 479, 1023, 837, 513],
[918, 972, 772, 658, 594, 12, 106, 225, 678, 920, 971, 724, 181, 864, 837],
[672, 237, 87, 36, 344, 866, 260, 473, 915, 203, 385, 23, 561, 754, 71],
[327, 65, 330, 525, 115, 837, 384, 734, 113, 178, 982, 285, 678, 392, 50],
[206, 317, 201, 954, 534, 692, 902, 773, 399, 215, 766, 143, 35, 135, 672],
[483, 984, 864, 843, 478, 811, 931, 656, 561, 636, 638, 326, 141, 140, 632],
[508, 315, 204, 862, 265, 444, 277, 658, 281, 1009, 453, 283, 387, 85, 677],
[586, 992, 528, 525, 90, 288, 15, 370, 939, 894, 791, 819, 879, 279, 222],
[639, 896, 792, 487, 853, 852, 690, 886, 141, 988, 889, 29, 899, 745, 864],
[551, 167, 982, 422, 768, 495, 244, 956, 991, 242, 353, 622, 168, 1019, 735],
[207, 155, 674, 423, 792, 755, 582, 541, 612, 429, 460, 947, 173, 471, 79],
[776, 304, 401, 113, 927, 439, 362, 612, 527, 343, 845, 326, 708, 83, 473],
],
]
).to(torch_device),
"dac_44khz": torch.tensor(
[
[
[330, 315, 315, 619, 481, 315, 197, 315, 315, 105, 481, 315, 481, 481, 481],
[718, 1007, 929, 6, 906, 944, 402, 750, 396, 854, 336, 426, 609, 356, 329],
[417, 266, 697, 456, 300, 941, 325, 923, 1022, 605, 991, 7, 939, 217, 456],
[813, 811, 271, 148, 184, 838, 723, 497, 678, 922, 12, 333, 918, 842, 285],
[832, 307, 635, 794, 334, 828, 32, 505, 610, 170, 161, 907, 193, 372, 585],
[ 91, 941, 912, 1001, 507, 486, 362, 1006, 157, 640, 760, 215, 577, 256, 371],
[676, 27, 903, 472, 473, 881, 860, 477, 514, 385, 533, 911, 701, 102, 825],
[326, 399, 116, 443, 605, 807, 534, 199, 559, 538, 516, 983, 372, 861, 167],
[776, 843, 185, 326, 723, 390, 318, 34, 191, 674, 728, 554, 721, 354, 267],
],
[
[578, 698, 330, 330, 330, 578, 330, 801, 330, 330, 330, 330, 330, 330, 330],
[171, 503, 725, 215, 814, 861, 139, 684, 880, 905, 937, 418, 359, 190, 823],
[141, 482, 780, 489, 845, 499, 59, 480, 296, 30, 631, 540, 399, 23, 385],
[402, 837, 216, 116, 535, 456, 1006, 969, 994, 125, 1011, 285, 851, 832, 197],
[46, 950, 728, 645, 850, 839, 527, 850, 81, 449, 590, 166, 22, 148, 402],
[98, 758, 474, 941, 217, 667, 681, 109, 719, 233, 162, 160, 329, 627, 716],
[999, 228, 752, 639, 404, 333, 993, 177, 888, 158, 644, 221, 1011, 302, 79],
[669, 535, 164, 665, 809, 798, 448, 800, 123, 936, 639, 361, 353, 402, 160],
[345, 355, 940, 261, 71, 946, 750, 120, 565, 692, 813, 976, 946, 50, 516],
],
]
).to(torch_device),
}
EXPECTED_DEC_OUTPUTS_BATCH = {
"dac_16khz": torch.tensor([[-1.9537e-04, 1.9159e-04, 3.1591e-04, 2.0804e-04, -3.1973e-05,
-3.3672e-04, -4.6511e-04, -4.3928e-04, -2.8604e-04, 2.7375e-04,
8.8118e-04, 1.1193e-03, 1.6241e-03, 1.9374e-03, 1.7826e-03,
5.9879e-04, -4.4053e-04, -1.3708e-03, -1.9989e-03, -2.0518e-03,
-1.5591e-03, -4.0491e-04, 6.3700e-04, 1.2456e-03, 1.3381e-03,
1.2848e-03, 6.0356e-04, 9.4392e-05, -6.1609e-04, -1.3806e-03,
-1.4977e-03, -9.7825e-04, -3.8692e-04, 5.3131e-04, 1.8666e-03,
2.3713e-03, 2.1134e-03, 1.4220e-03, 7.3615e-04, -2.5369e-04,
-9.8636e-04, -1.3868e-03, -1.6701e-03, -1.0521e-03, -6.2109e-04,
-5.3288e-04, -2.1532e-04, 4.1671e-04, 7.7438e-04, 8.0039e-04],
[ 6.5413e-05, 3.6614e-04, -1.4457e-03, -2.3634e-04, -3.6627e-04,
-1.3334e-03, 1.0519e-03, -1.4445e-03, 2.1915e-04, -3.3080e-04,
-1.3308e-03, 4.8407e-04, 8.6294e-04, -1.7639e-03, 4.2044e-05,
2.0936e-04, -2.9692e-03, 8.7512e-04, 1.3507e-03, 2.0057e-03,
-5.5121e-04, 1.3708e-03, -3.1085e-05, -2.6315e-03, -6.7661e-04,
6.2430e-04, 8.3580e-04, -1.5940e-03, 3.3061e-03, 1.3702e-03,
-1.7913e-03, -4.0576e-05, -5.5106e-04, -9.3050e-04, -2.3780e-03,
-5.3527e-04, 1.5840e-03, -1.4020e-03, 1.2090e-03, 6.0580e-04,
-1.8049e-03, 3.5135e-05, -3.0823e-03, 5.0042e-04, -1.1099e-03,
1.1512e-04, 3.3324e-03, -1.7616e-03, 5.2421e-04, -1.3589e-03]]).to(torch_device),
"dac_24khz": torch.tensor([[ 2.5545e-04, 8.9353e-05, -4.1158e-04, -6.1750e-04, -5.9480e-04,
-5.6071e-04, -5.2090e-04, -4.2821e-04, -1.4335e-04, -6.9339e-05,
-9.0480e-05, 6.5549e-05, 7.5300e-05, 1.9337e-07, 2.0931e-04,
4.1511e-04, 1.1008e-04, -1.6662e-04, 4.9021e-05, 4.0946e-04,
4.3870e-04, 3.9847e-04, 4.1346e-04, 2.3158e-04, 2.4527e-04,
4.4284e-04, 3.8170e-04, 1.2579e-04, -4.0307e-05, -2.8757e-04,
-8.5801e-04, -1.4023e-03, -1.5856e-03, -1.5326e-03, -1.5314e-03,
-1.4345e-03, -1.0435e-03, -5.2566e-04, 2.8071e-05, 5.4406e-04,
8.9030e-04, 1.0047e-03, 1.0342e-03, 9.4115e-04, 6.8876e-04,
3.2003e-04, -7.9418e-05, -4.0320e-04, -5.7941e-04, -7.3025e-04],
[-4.7845e-04, 3.8872e-04, 4.0155e-04, 3.6504e-04, 1.5022e-03,
1.2856e-03, -1.8015e-04, -7.2616e-05, 6.3906e-04, -1.1491e-03,
-2.7369e-03, -1.5336e-03, -8.2313e-04, -1.6791e-03, -9.4759e-06,
2.3807e-03, -2.2854e-04, -2.9693e-03, 2.9812e-04, 2.7258e-03,
-3.8019e-04, -2.2031e-03, -3.6195e-04, -6.6059e-04, -2.0270e-03,
-9.9469e-05, 5.4256e-04, -3.3896e-03, -3.9328e-03, 5.6228e-04,
1.1226e-03, -1.0931e-03, 1.0939e-03, 2.9646e-03, -4.1916e-04,
-1.8292e-03, 1.0766e-03, 2.3094e-04, -3.4554e-03, -2.0085e-03,
5.9608e-04, -1.3147e-03, -1.3603e-03, 1.8352e-03, 4.6342e-04,
-2.6805e-03, -1.3435e-05, 2.8397e-03, 1.0937e-04, -1.7540e-03]]).to(torch_device),
"dac_44khz": torch.tensor([[-4.8139e-04, -2.2367e-04, 3.1570e-06, 1.6349e-04, 2.6632e-04,
3.9803e-04, 5.3275e-04, 7.0730e-04, 8.0937e-04, 9.2120e-04,
1.0271e-03, 1.0728e-03, 1.0603e-03, 1.0328e-03, 9.8452e-04,
8.4670e-04, 6.5249e-04, 4.2936e-04, 1.9743e-04, -4.4033e-06,
-1.5679e-04, -2.3475e-04, -2.6826e-04, -2.6645e-04, -2.9844e-04,
-3.6448e-04, -4.6388e-04, -5.5712e-04, -6.4478e-04, -7.0090e-04,
-7.1978e-04, -6.8389e-04, -6.1487e-04, -4.9192e-04, -3.1528e-04,
-1.3920e-04, 1.6591e-05, 1.4938e-04, 2.6723e-04, 4.0855e-04,
6.0641e-04, 8.1632e-04, 9.6742e-04, 1.0481e-03, 1.0581e-03,
1.0213e-03, 9.3807e-04, 8.1994e-04, 6.9299e-04, 5.8774e-04],
[ 7.2770e-04, 8.2807e-04, 3.7124e-04, -4.1002e-04, -8.7899e-04,
-6.0642e-04, 2.0435e-04, 1.0668e-03, 1.3318e-03, 7.8307e-04,
-3.2117e-04, -1.3448e-03, -1.6520e-03, -1.0778e-03, 2.4146e-05,
9.8221e-04, 1.2399e-03, 7.6147e-04, -2.2230e-05, -4.7415e-04,
-1.4114e-04, 8.9560e-04, 1.9897e-03, 2.4969e-03, 2.0585e-03,
1.0263e-03, 1.5015e-04, 9.2623e-05, 7.8239e-04, 1.3270e-03,
7.3531e-04, -1.1100e-03, -3.1865e-03, -3.9610e-03, -2.6410e-03,
-6.5765e-06, 1.9960e-03, 1.7654e-03, -5.9006e-04, -3.2932e-03,
-4.2902e-03, -2.8423e-03, -6.7126e-05, 2.0438e-03, 2.2075e-03,
8.8849e-04, -3.6330e-04, -3.9405e-04, 6.1344e-04, 1.4316e-03]]).to(torch_device),
}
EXPECTED_QUANT_CODEBOOK_LOSS_BATCH = {
"dac_16khz": 20.6472,
"dac_24khz": 23.5954,
"dac_44khz": 16.1380,
}
EXPECTED_CODEC_ERROR_BATCH = {
"dac_16khz": 0.0019726448226720095,
"dac_24khz": 0.0013017073506489396,
"dac_44khz": 0.0003825263702310622,
}
# fmt: on
@slow
@require_torch
class DacIntegrationTest(unittest.TestCase):
@parameterized.expand([(model_name,) for model_name in EXPECTED_PREPROC_SHAPE.keys()])
def test_integration(self, model_name):
# load model and processor
model_id = f"descript/{model_name}"
model = DacModel.from_pretrained(model_id, force_download=True).to(torch_device).eval()
processor = AutoProcessor.from_pretrained(model_id)
# load audio sample
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate))
audio_sample = librispeech_dummy[0]["audio"]["array"]
# check on processor audio shape
inputs = processor(
raw_audio=audio_sample,
sampling_rate=processor.sampling_rate,
return_tensors="pt",
).to(torch_device)
torch.equal(torch.tensor(inputs["input_values"].shape), EXPECTED_PREPROC_SHAPE[model_name])
with torch.no_grad():
# compare encoder loss
encoder_outputs = model.encode(inputs["input_values"])
torch.testing.assert_close(
encoder_outputs[0].squeeze().item(), EXPECTED_ENC_LOSS[model_name], rtol=1e-3, atol=1e-3
)
# compare quantizer outputs
quantizer_outputs = model.quantizer(encoder_outputs[1])
torch.testing.assert_close(
quantizer_outputs[1][..., : EXPECTED_QUANT_CODES[model_name].shape[-1]],
EXPECTED_QUANT_CODES[model_name],
rtol=1e-6,
atol=1e-6,
)
torch.testing.assert_close(
quantizer_outputs[4].squeeze().item(), EXPECTED_QUANT_CODEBOOK_LOSS[model_name], rtol=1e-4, atol=1e-4
)
# compare decoder outputs
decoded_outputs = model.decode(encoder_outputs[1])
torch.testing.assert_close(
decoded_outputs["audio_values"][..., : EXPECTED_DEC_OUTPUTS[model_name].shape[-1]],
EXPECTED_DEC_OUTPUTS[model_name],
rtol=1e-3,
atol=1e-3,
)
# compare codec error / lossiness
codec_err = compute_rmse(decoded_outputs["audio_values"], inputs["input_values"])
torch.testing.assert_close(codec_err, EXPECTED_CODEC_ERROR[model_name], rtol=1e-5, atol=1e-5)
# make sure forward and decode gives same result
enc_dec = model(inputs["input_values"])[1]
torch.testing.assert_close(decoded_outputs["audio_values"], enc_dec, rtol=1e-6, atol=1e-6)
@parameterized.expand([(model_name,) for model_name in EXPECTED_PREPROC_SHAPE_BATCH.keys()])
def test_integration_batch(self, model_name):
# load model and processor
model_id = f"descript/{model_name}"
model = DacModel.from_pretrained(model_id).to(torch_device)
processor = AutoProcessor.from_pretrained(model_id)
# load audio samples
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate))
audio_samples = [np.array([audio_sample["array"]])[0] for audio_sample in librispeech_dummy[-2:]["audio"]]
# check on processor audio shape
inputs = processor(
raw_audio=audio_samples,
sampling_rate=processor.sampling_rate,
truncation=False,
return_tensors="pt",
).to(torch_device)
torch.equal(torch.tensor(inputs["input_values"].shape), EXPECTED_PREPROC_SHAPE_BATCH[model_name])
with torch.no_grad():
# compare encoder loss
encoder_outputs = model.encode(inputs["input_values"])
torch.testing.assert_close(
encoder_outputs[0].mean().item(), EXPECTED_ENC_LOSS_BATCH[model_name], rtol=1e-3, atol=1e-3
)
# compare quantizer outputs
quantizer_outputs = model.quantizer(encoder_outputs[1])
torch.testing.assert_close(
quantizer_outputs[1][..., : EXPECTED_QUANT_CODES_BATCH[model_name].shape[-1]],
EXPECTED_QUANT_CODES_BATCH[model_name],
rtol=1e-6,
atol=1e-6,
)
torch.testing.assert_close(
quantizer_outputs[4].mean().item(),
EXPECTED_QUANT_CODEBOOK_LOSS_BATCH[model_name],
rtol=1e-4,
atol=1e-4,
)
# compare decoder outputs
decoded_outputs = model.decode(encoder_outputs[1])
torch.testing.assert_close(
EXPECTED_DEC_OUTPUTS_BATCH[model_name],
decoded_outputs["audio_values"][..., : EXPECTED_DEC_OUTPUTS_BATCH[model_name].shape[-1]],
rtol=1e-3,
atol=1e-3,
)
# compare codec error / lossiness
codec_err = compute_rmse(decoded_outputs["audio_values"], inputs["input_values"])
torch.testing.assert_close(codec_err, EXPECTED_CODEC_ERROR_BATCH[model_name], rtol=1e-6, atol=1e-6)
# make sure forward and decode gives same result
enc_dec = model(inputs["input_values"])[1]
torch.testing.assert_close(decoded_outputs["audio_values"], enc_dec, rtol=1e-6, atol=1e-6)
| transformers/tests/models/dac/test_modeling_dac.py/0 | {
"file_path": "transformers/tests/models/dac/test_modeling_dac.py",
"repo_id": "transformers",
"token_count": 25227
} | 570 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch DeepSeekV2 model."""
import unittest
import pytest
from transformers import BitsAndBytesConfig, Cache, DeepseekV2Config, is_torch_available
from transformers.testing_utils import require_read_token, require_torch, require_torch_accelerator, slow, torch_device
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import AutoTokenizer, DeepseekV2ForCausalLM, DeepseekV2ForSequenceClassification, DeepseekV2Model
from transformers.models.deepseek_v2.modeling_deepseek_v2 import DeepseekV2RotaryEmbedding
class DeepseekV2ModelTester(CausalLMModelTester):
if is_torch_available():
config_class = DeepseekV2Config
base_model_class = DeepseekV2Model
causal_lm_class = DeepseekV2ForCausalLM
sequence_class = DeepseekV2ForSequenceClassification
def __init__(
self,
parent,
n_routed_experts=8,
kv_lora_rank=32,
q_lora_rank=16,
qk_nope_head_dim=64,
qk_rope_head_dim=64,
):
super().__init__(parent=parent)
self.n_routed_experts = n_routed_experts
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
@require_torch
class DeepseekV2ModelTest(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(
DeepseekV2ForCausalLM,
DeepseekV2ForSequenceClassification,
DeepseekV2Model,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": DeepseekV2Model,
"text-classification": DeepseekV2ForSequenceClassification,
"text-generation": DeepseekV2ForCausalLM,
"zero-shot": DeepseekV2ForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
fx_compatible = False
test_torchscript = False
model_tester_class = DeepseekV2ModelTester
rotary_embedding_layer = DeepseekV2RotaryEmbedding
model_split_percents = [0.5, 0.7, 0.8]
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = DeepseekV2ForCausalLM if is_torch_available() else None
def test_model_rope_scaling(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
scaling_factor = 10
short_input_length = 10
long_input_length = int(config.max_position_embeddings * 1.5)
# Inputs
x = torch.randn(1, dtype=torch.float32, device=torch_device) # used exlusively to get the dtype and the device
position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device)
position_ids_short = position_ids_short.unsqueeze(0)
position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device)
position_ids_long = position_ids_long.unsqueeze(0)
# Sanity check original RoPE
original_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
original_freqs_cis_short = original_rope(x, position_ids_short)
original_freqs_cis_long = original_rope(x, position_ids_long)
torch.testing.assert_close(original_freqs_cis_short, original_freqs_cis_long[:, :short_input_length, :])
# Sanity check linear RoPE scaling
# New position "x" should match original position with index "x/scaling_factor"
config.rope_scaling = {"type": "linear", "factor": scaling_factor}
linear_scaling_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
linear_freqs_cis_short = linear_scaling_rope(x, position_ids_short)
linear_freqs_cis_long = linear_scaling_rope(x, position_ids_long)
torch.testing.assert_close(linear_freqs_cis_short, linear_freqs_cis_long[:, :short_input_length, :])
# Sanity check Dynamic NTK RoPE scaling
# Scaling should only be observed after a long input is fed. We can observe that the frequencies increase
# with scaling_factor (or that `inv_freq` decreases)
config.rope_scaling = {"type": "dynamic", "factor": scaling_factor}
ntk_scaling_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
ntk_freqs_cis_short = ntk_scaling_rope(x, position_ids_short)
ntk_freqs_cis_long = ntk_scaling_rope(x, position_ids_long)
torch.testing.assert_close(ntk_freqs_cis_short, original_freqs_cis_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_freqs_cis_long, original_freqs_cis_long)
self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all())
# Sanity check Yarn RoPE scaling
# Scaling should be over the entire input
config.rope_scaling = {"type": "yarn", "factor": scaling_factor}
yarn_scaling_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
yarn_freqs_cis_short = yarn_scaling_rope(x, position_ids_short)
yarn_freqs_cis_long = yarn_scaling_rope(x, position_ids_long)
torch.testing.assert_close(yarn_freqs_cis_short, yarn_freqs_cis_long[:, :short_input_length, :])
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_freqs_cis_short, original_freqs_cis_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_freqs_cis_long, original_freqs_cis_long)
def test_past_key_values_format(self):
"""
Overwriting to pass the expected cache shapes (Deepseek-V3 uses MLA so the cache shapes are non-standard)
"""
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
batch_size, seq_length = inputs["input_ids"].shape
# difference: last dim
k_embed_dim = config.qk_nope_head_dim + config.qk_rope_head_dim
v_embed_dim = config.v_head_dim
self_attention_key_cache_shape = (batch_size, config.num_key_value_heads, seq_length, k_embed_dim)
self_attention_value_cache_shape = (batch_size, config.num_key_value_heads, seq_length, v_embed_dim)
# build the full cache shapes
num_hidden_layers = config.num_hidden_layers
all_cache_shapes = [
[self_attention_key_cache_shape, self_attention_value_cache_shape] for _ in range(num_hidden_layers)
]
super().test_past_key_values_format(custom_all_cache_shapes=all_cache_shapes)
def _check_past_key_values_for_generate(self, batch_size, decoder_past_key_values, cache_length, config):
"""Needs to be overriden as deepseek has special MLA cache format (though we don't really use the MLA)"""
self.assertIsInstance(decoder_past_key_values, Cache)
# (batch, head, seq_length, head_features)
expected_common_shape = (
batch_size,
config.num_key_value_heads if hasattr(config, "num_key_value_heads") else config.num_attention_heads,
cache_length,
)
expected_key_shape = expected_common_shape + (config.qk_nope_head_dim + config.qk_rope_head_dim,)
expected_value_shape = expected_common_shape + (config.v_head_dim,)
if isinstance(decoder_past_key_values, Cache):
for layer in decoder_past_key_values.layers:
self.assertEqual(layer.keys.shape, expected_key_shape)
self.assertEqual(layer.values.shape, expected_value_shape)
@unittest.skip("Dynamic control flow in MoE")
@pytest.mark.torch_compile_test
def test_torch_compile_for_training(self):
pass
@slow
@require_read_token
@require_torch_accelerator
class DeepseekV2IntegrationTest(unittest.TestCase):
def test_deepseek_v2_lite(self):
EXPECTED_TEXT = ['An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors.\n\nAttention functions are used in a variety of applications, including natural language processing, computer vision, and reinforcement learning.\n\nThe attention function is a function that takes a query and a set of key-value pairs as input and outputs a vector'] # fmt: skip
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-V2-Lite")
model = DeepseekV2ForCausalLM.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite",
device_map=torch_device,
dtype=torch.bfloat16,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
input_text = [
"An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors." # fmt: skip
]
model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=50, do_sample=False)
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
def test_logits_eager(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = DeepseekV2ForCausalLM.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite",
device_map=torch_device,
dtype=torch.bfloat16,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
attn_implementation="eager",
)
with torch.no_grad():
out = model(torch.tensor([input_ids]).to(torch_device))
EXPECTED_MEAN = torch.tensor([[-6.1232, -5.0952, -4.4493, -2.6536, -2.0608, -2.3991, -3.8013, -2.8681]], device=torch_device) # fmt: skip
torch.testing.assert_close(out.logits.float().mean(-1), EXPECTED_MEAN, atol=1e-3, rtol=1e-3)
EXPECTED_SLICE = torch.tensor([-1.2500, -0.9961, -0.0194, -3.1562, 1.2812, -2.7656, -0.8438, -3.0469, -2.7812, -0.6328, -0.4160, -1.9688, -2.4219, -1.0391, -3.8906], device=torch_device) # fmt: skip
torch.testing.assert_close(out.logits[0, 0, :15].float(), EXPECTED_SLICE, atol=1e-3, rtol=1e-3)
def test_batch_fa2(self):
EXPECTED_TEXT = [
"Simply put, the theory of relativity states that \nthe laws of physics are the same for all observers, regardless of their \nrelative motion.\nThe theory of relativity is a theory of space, time, and gravity.\nThe theory of", # fmt: skip
"My favorite all time favorite condiment is ketchup. I love ketchup. I love ketchup on my hot dogs, hamburgers, french fries, and even on my eggs. I love ketchup. I love ketchup so much that I", # fmt: skip
]
prompts = [
"Simply put, the theory of relativity states that ",
"My favorite all time favorite condiment is ketchup.",
]
tokenizer = AutoTokenizer.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite", pad_token="</s>", padding_side="right"
)
model = DeepseekV2ForCausalLM.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite",
device_map=torch_device,
dtype=torch.bfloat16,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False)
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, generated_text)
| transformers/tests/models/deepseek_v2/test_modeling_deepseek_v2.py/0 | {
"file_path": "transformers/tests/models/deepseek_v2/test_modeling_deepseek_v2.py",
"repo_id": "transformers",
"token_count": 5188
} | 571 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch DeiT model."""
import unittest
import warnings
from transformers import DeiTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_accelerator,
require_torch_fp16,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.auto.modeling_auto import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
MODEL_MAPPING_NAMES,
)
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class DeiTModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=3,
scope=None,
encoder_stride=2,
mask_ratio=0.5,
attn_implementation="eager",
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.scope = scope
self.encoder_stride = encoder_stride
self.attn_implementation = attn_implementation
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 2
self.mask_ratio = mask_ratio
self.num_masks = int(mask_ratio * self.seq_length)
self.mask_length = num_patches
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return DeiTConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
encoder_stride=self.encoder_stride,
attn_implementation=self.attn_implementation,
)
def create_and_check_model(self, config, pixel_values, labels):
model = DeiTModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels):
model = DeiTForMaskedImageModeling(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size)
)
# test greyscale images
config.num_channels = 1
model = DeiTForMaskedImageModeling(config)
model.to(torch_device)
model.eval()
pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
result = model(pixel_values)
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size))
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.type_sequence_label_size
model = DeiTForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
config.num_channels = 1
model = DeiTForImageClassification(config)
model.to(torch_device)
model.eval()
pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
labels,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as DeiT does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"image-feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_torch_exportable = True
def setUp(self):
self.model_tester = DeiTModelTester(self)
self.config_tester = ConfigTester(self, config_class=DeiTConfig, has_text_modality=False, hidden_size=37)
@unittest.skip(
"Since `torch==2.3+cu121`, although this test passes, many subsequent tests have `CUDA error: misaligned address`."
"If `nvidia-xxx-cu118` are also installed, no failure (even with `torch==2.3+cu121`)."
)
def test_multi_gpu_data_parallel_forward(self):
super().test_multi_gpu_data_parallel_forward()
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_image_modeling(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
# special case for DeiTForImageClassificationWithTeacher model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def test_training(self):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class.__name__ in MODEL_MAPPING_NAMES.values()
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
config.use_cache = False
config.return_dict = True
for model_class in self.all_model_classes:
if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
model = model_class(config)
model.gradient_checkpointing_enable()
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_problem_types(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
problem_types = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class.__name__
not in [
*MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values(),
*MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"):
config.problem_type = problem_type["title"]
config.num_labels = problem_type["num_labels"]
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
if problem_type["num_labels"] > 1:
inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"])
inputs["labels"] = inputs["labels"].to(problem_type["dtype"])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=True) as warning_list:
loss = model(**inputs).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}"
)
loss.backward()
@slow
def test_model_from_pretrained(self):
model_name = "facebook/deit-base-distilled-patch16-224"
model = DeiTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class DeiTModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
if is_vision_available()
else None
)
@slow
def test_inference_image_classification_head(self):
model = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224").to(
torch_device
)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-1.0266, 0.1912, -1.2861]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
model = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224").to(
torch_device
)
image_processor = self.default_image_processor
# image size is {"height": 480, "width": 640}
image = prepare_img()
image_processor.size = {"height": 480, "width": 640}
# center crop set to False so image is not center cropped to 224x224
inputs = image_processor(images=image, return_tensors="pt", do_center_crop=False).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
@slow
@require_accelerate
@require_torch_accelerator
@require_torch_fp16
def test_inference_fp16(self):
r"""
A small test to make sure that inference work in half precision without any problem.
"""
model = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224", dtype=torch.float16, device_map="auto"
)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_ = model(pixel_values)
| transformers/tests/models/deit/test_modeling_deit.py/0 | {
"file_path": "transformers/tests/models/deit/test_modeling_deit.py",
"repo_id": "transformers",
"token_count": 7901
} | 572 |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import shutil
import tempfile
import unittest
from transformers import (
AutoProcessor,
EvollaProcessor,
)
from transformers.testing_utils import require_torch
from transformers.utils import is_torch_available
from ...test_processing_common import ProcessorTesterMixin
if is_torch_available():
import torch
EVOLLA_VALID_AA = list("ACDEFGHIKLMNPQRSTVWY#")
EVOLLA_VALID_FS = list("pynwrqhgdlvtmfsaeikc#")
@require_torch
class EvollaProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = EvollaProcessor
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
processor = EvollaProcessor.from_pretrained("westlake-repl/Evolla-10B-hf")
processor.save_pretrained(self.tmpdirname)
self.input_keys = ["protein_input_ids", "protein_attention_mask", "input_ids", "attention_mask"]
def prepare_input_and_expected_output(self):
amino_acid_sequence = "AAAA"
foldseek_sequence = "dddd"
question = "What is the function of this protein?"
expected_output = {
"protein_input_ids": torch.tensor([[0, 13, 13, 13, 13, 2]]),
"protein_attention_mask": torch.tensor([[1, 1, 1, 1, 1, 1]]),
"input_ids": torch.tensor(
[
[
128000,
128006,
9125,
128007,
271,
2675,
527,
459,
15592,
6335,
430,
649,
4320,
904,
4860,
922,
13128,
13,
128009,
128006,
882,
128007,
271,
3923,
374,
279,
734,
315,
420,
13128,
30,
128009,
128006,
78191,
128007,
271,
]
]
),
"attention_mask": torch.tensor(
[
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
]
]
),
}
protein_dict = {"aa_seq": amino_acid_sequence, "foldseek": foldseek_sequence}
message = [
{"role": "system", "content": "You are an AI expert that can answer any questions about protein."},
{"role": "user", "content": question},
]
return protein_dict, message, expected_output
def test_processor(self):
protein_tokenizer = self.get_protein_tokenizer()
tokenizer = self.get_tokenizer()
processor = EvollaProcessor(protein_tokenizer, tokenizer)
protein_dict, message, expected_output = self.prepare_input_and_expected_output()
inputs = processor(proteins=[protein_dict], messages_list=[message])
# check if the input is correct
for key, value in expected_output.items():
self.assertTrue(
torch.equal(inputs[key], value),
f"inputs[key] is {inputs[key]} and expected_output[key] is {value}",
)
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_protein_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).protein_tokenizer
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_inputs_single(self):
proteins = {
"aa_seq": "".join(random.choices(EVOLLA_VALID_AA, k=100)),
"foldseek": "".join(random.choices(EVOLLA_VALID_FS, k=100)),
}
return proteins
def prepare_inputs_pair(self):
proteins = [
{
"aa_seq": "".join(random.choices(EVOLLA_VALID_AA, k=100)),
"foldseek": "".join(random.choices(EVOLLA_VALID_FS, k=100)),
},
{
"aa_seq": "".join(random.choices(EVOLLA_VALID_AA, k=100)),
"foldseek": "".join(random.choices(EVOLLA_VALID_FS, k=100)),
},
]
return proteins
def prepare_inputs_long(self):
proteins = [
{
"aa_seq": "".join(random.choices(EVOLLA_VALID_AA, k=100)),
"foldseek": "".join(random.choices(EVOLLA_VALID_FS, k=100)),
},
{
"aa_seq": "".join(random.choices(EVOLLA_VALID_AA, k=2000)),
"foldseek": "".join(random.choices(EVOLLA_VALID_FS, k=2000)),
},
]
return proteins
def prepare_inputs_short(self):
proteins = [
{
"aa_seq": "".join(random.choices(EVOLLA_VALID_AA, k=1)),
"foldseek": "".join(random.choices(EVOLLA_VALID_FS, k=1)),
},
{
"aa_seq": "".join(random.choices(EVOLLA_VALID_AA, k=100)),
"foldseek": "".join(random.choices(EVOLLA_VALID_FS, k=100)),
},
]
return proteins
def prepare_inputs_empty(self):
proteins = [
{
"aa_seq": "",
"foldseek": "",
},
{
"aa_seq": "".join(random.choices(EVOLLA_VALID_AA, k=100)),
"foldseek": "".join(random.choices(EVOLLA_VALID_FS, k=100)),
},
]
return proteins
def prepare_inputs(self, protein_types="pair"):
r"""
Prepare inputs for the test.
Args:
protein_types (`str`): the types of proteins to prepare.
- "single": a single correct protein.
- "pair": a pair of correct proteins.
- "long": a long sequence of correct proteins and a correct protein.
- "short": a short sequence of correct proteins (only have 1 aa) and a correct protein.
- "empty": an empty sequence of proteins and a correct protein.
"""
if protein_types == "single":
proteins = self.prepare_inputs_single()
elif protein_types == "pair":
proteins = self.prepare_inputs_pair()
elif protein_types == "long":
proteins = self.prepare_inputs_long()
elif protein_types == "short":
proteins = self.prepare_inputs_short()
elif protein_types == "empty":
proteins = self.prepare_inputs_empty()
else:
raise ValueError(
f"protein_types should be one of 'single', 'pair', 'long','short', 'empty', but got {protein_types}"
)
questions = ["What is the function of the protein?"] * len(proteins)
messages_list = []
for question in questions:
messages = [
{"role": "system", "content": "You are an AI expert that can answer any questions about protein."},
{"role": "user", "content": question},
]
messages_list.append(messages)
return proteins, messages_list
def test_tokenizer_decode(self):
protein_tokenizer = self.get_protein_tokenizer()
tokenizer = self.get_tokenizer()
processor = EvollaProcessor(tokenizer=tokenizer, protein_tokenizer=protein_tokenizer, return_tensors="pt")
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
protein_tokenizer = self.get_protein_tokenizer()
tokenizer = self.get_tokenizer()
processor = EvollaProcessor(tokenizer=tokenizer, protein_tokenizer=protein_tokenizer)
proteins, messages_list = self.prepare_inputs()
inputs = processor(messages_list=messages_list, proteins=proteins, padding="longest", return_tensors="pt")
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertSetEqual(set(inputs.keys()), set(self.input_keys))
| transformers/tests/models/evolla/test_processing_evolla.py/0 | {
"file_path": "transformers/tests/models/evolla/test_processing_evolla.py",
"repo_id": "transformers",
"token_count": 5593
} | 573 |
# Copyright 2022 Meta Platforms authors and HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy as np
import requests
from PIL import Image
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
import PIL
from transformers import FlavaImageProcessor
if is_torchvision_available():
from transformers import FlavaImageProcessorFast
from transformers.image_utils import PILImageResampling
from transformers.models.flava.image_processing_flava import (
FLAVA_CODEBOOK_MEAN,
FLAVA_CODEBOOK_STD,
FLAVA_IMAGE_MEAN,
FLAVA_IMAGE_STD,
)
else:
FLAVA_IMAGE_MEAN = FLAVA_IMAGE_STD = FLAVA_CODEBOOK_MEAN = FLAVA_CODEBOOK_STD = None
class FlavaImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
resample=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=FLAVA_IMAGE_MEAN,
image_std=FLAVA_IMAGE_STD,
input_size_patches=14,
total_mask_patches=75,
mask_group_max_patches=None,
mask_group_min_patches=16,
mask_group_min_aspect_ratio=0.3,
mask_group_max_aspect_ratio=None,
codebook_do_resize=True,
codebook_size=None,
codebook_resample=None,
codebook_do_center_crop=True,
codebook_crop_size=None,
codebook_do_map_pixels=True,
codebook_do_normalize=True,
codebook_image_mean=FLAVA_CODEBOOK_MEAN,
codebook_image_std=FLAVA_CODEBOOK_STD,
):
size = size if size is not None else {"height": 224, "width": 224}
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112}
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.do_resize = do_resize
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.size = size
self.resample = resample if resample is not None else PILImageResampling.BICUBIC
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.input_size_patches = input_size_patches
self.total_mask_patches = total_mask_patches
self.mask_group_max_patches = mask_group_max_patches
self.mask_group_min_patches = mask_group_min_patches
self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio
self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio
self.codebook_do_resize = codebook_do_resize
self.codebook_size = codebook_size
# LANCZOS resample does not support torch Tensor. Use BICUBIC as closest alternative
self.codebook_resample = codebook_resample if codebook_resample is not None else PILImageResampling.BICUBIC
self.codebook_do_center_crop = codebook_do_center_crop
self.codebook_crop_size = codebook_crop_size
self.codebook_do_map_pixels = codebook_do_map_pixels
self.codebook_do_normalize = codebook_do_normalize
self.codebook_image_mean = codebook_image_mean
self.codebook_image_std = codebook_image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"resample": self.resample,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"input_size_patches": self.input_size_patches,
"total_mask_patches": self.total_mask_patches,
"mask_group_max_patches": self.mask_group_max_patches,
"mask_group_min_patches": self.mask_group_min_patches,
"mask_group_min_aspect_ratio": self.mask_group_min_aspect_ratio,
"mask_group_max_aspect_ratio": self.mask_group_min_aspect_ratio,
"codebook_do_resize": self.codebook_do_resize,
"codebook_size": self.codebook_size,
"codebook_resample": self.codebook_resample,
"codebook_do_center_crop": self.codebook_do_center_crop,
"codebook_crop_size": self.codebook_crop_size,
"codebook_do_map_pixels": self.codebook_do_map_pixels,
"codebook_do_normalize": self.codebook_do_normalize,
"codebook_image_mean": self.codebook_image_mean,
"codebook_image_std": self.codebook_image_std,
}
def get_expected_image_size(self):
return (self.size["height"], self.size["width"])
def get_expected_mask_size(self):
return (
(self.input_size_patches, self.input_size_patches)
if not isinstance(self.input_size_patches, tuple)
else self.input_size_patches
)
def get_expected_codebook_image_size(self):
return (self.codebook_size["height"], self.codebook_size["width"])
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class FlavaImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = FlavaImageProcessor if is_vision_available() else None
fast_image_processing_class = FlavaImageProcessorFast if is_torchvision_available() else None
maxDiff = None
def setUp(self):
super().setUp()
self.image_processor_tester = FlavaImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "resample"))
self.assertTrue(hasattr(image_processing, "crop_size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "masking_generator"))
self.assertTrue(hasattr(image_processing, "codebook_do_resize"))
self.assertTrue(hasattr(image_processing, "codebook_size"))
self.assertTrue(hasattr(image_processing, "codebook_resample"))
self.assertTrue(hasattr(image_processing, "codebook_do_center_crop"))
self.assertTrue(hasattr(image_processing, "codebook_crop_size"))
self.assertTrue(hasattr(image_processing, "codebook_do_map_pixels"))
self.assertTrue(hasattr(image_processing, "codebook_do_normalize"))
self.assertTrue(hasattr(image_processing, "codebook_image_mean"))
self.assertTrue(hasattr(image_processing, "codebook_image_std"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 224, "width": 224})
self.assertEqual(image_processor.crop_size, {"height": 224, "width": 224})
self.assertEqual(image_processor.codebook_size, {"height": 112, "width": 112})
self.assertEqual(image_processor.codebook_crop_size, {"height": 112, "width": 112})
image_processor = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, crop_size=84, codebook_size=33, codebook_crop_size=66
)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
self.assertEqual(image_processor.codebook_size, {"height": 33, "width": 33})
self.assertEqual(image_processor.codebook_crop_size, {"height": 66, "width": 66})
def test_call_pil(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, PIL.Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt")
# Test no bool masked pos
self.assertFalse("bool_masked_pos" in encoded_images)
expected_height, expected_width = self.image_processor_tester.get_expected_image_size()
self.assertEqual(
encoded_images.pixel_values.shape,
(1, self.image_processor_tester.num_channels, expected_height, expected_width),
)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt")
expected_height, expected_width = self.image_processor_tester.get_expected_image_size()
# Test no bool masked pos
self.assertFalse("bool_masked_pos" in encoded_images)
self.assertEqual(
encoded_images.pixel_values.shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),
)
def _test_call_framework(self, instance_class, prepare_kwargs):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, **prepare_kwargs)
for image in image_inputs:
self.assertIsInstance(image, instance_class)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt")
expected_height, expected_width = self.image_processor_tester.get_expected_image_size()
self.assertEqual(
encoded_images.pixel_values.shape,
(1, self.image_processor_tester.num_channels, expected_height, expected_width),
)
encoded_images = image_processing(image_inputs, return_image_mask=True, return_tensors="pt")
expected_height, expected_width = self.image_processor_tester.get_expected_image_size()
self.assertEqual(
encoded_images.pixel_values.shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),
)
expected_height, expected_width = self.image_processor_tester.get_expected_mask_size()
self.assertEqual(
encoded_images.bool_masked_pos.shape,
(
self.image_processor_tester.batch_size,
expected_height,
expected_width,
),
)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_height, expected_width = self.image_processor_tester.get_expected_image_size()
self.assertEqual(
encoded_images.shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),
)
# Test masking
encoded_images = image_processing(image_inputs, return_image_mask=True, return_tensors="pt")
expected_height, expected_width = self.image_processor_tester.get_expected_image_size()
self.assertEqual(
encoded_images.pixel_values.shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),
)
expected_height, expected_width = self.image_processor_tester.get_expected_mask_size()
self.assertEqual(
encoded_images.bool_masked_pos.shape,
(
self.image_processor_tester.batch_size,
expected_height,
expected_width,
),
)
def test_call_numpy(self):
self._test_call_framework(np.ndarray, prepare_kwargs={"numpify": True})
def test_call_numpy_4_channels(self):
self.image_processing_class.num_channels = 4
self._test_call_framework(np.ndarray, prepare_kwargs={"numpify": True})
self.image_processing_class.num_channels = 3
def test_call_pytorch(self):
self._test_call_framework(torch.Tensor, prepare_kwargs={"torchify": True})
def test_masking(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
random.seed(1234)
image_processing = image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_image_mask=True, return_tensors="pt")
self.assertEqual(encoded_images.bool_masked_pos.sum().item(), 75)
def test_codebook_pixels(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, PIL.Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_codebook_pixels=True, return_tensors="pt")
expected_height, expected_width = self.image_processor_tester.get_expected_codebook_image_size()
self.assertEqual(
encoded_images.codebook_pixel_values.shape,
(1, self.image_processor_tester.num_channels, expected_height, expected_width),
)
# Test batched
encoded_images = image_processing(image_inputs, return_codebook_pixels=True, return_tensors="pt")
expected_height, expected_width = self.image_processor_tester.get_expected_codebook_image_size()
self.assertEqual(
encoded_images.codebook_pixel_values.shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),
)
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = Image.open(
requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(
dummy_image, return_tensors="pt", return_codebook_pixels=True, return_image_mask=True
)
encoding_fast = image_processor_fast(
dummy_image, return_tensors="pt", return_codebook_pixels=True, return_image_mask=True
)
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self._assert_slow_fast_tensors_equivalence(
encoding_slow.codebook_pixel_values, encoding_fast.codebook_pixel_values
)
| transformers/tests/models/flava/test_image_processing_flava.py/0 | {
"file_path": "transformers/tests/models/flava/test_image_processing_flava.py",
"repo_id": "transformers",
"token_count": 8494
} | 574 |
# Copyright 2020 HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from functools import lru_cache
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible
@require_tokenizers
class FunnelTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "funnel-transformer/small"
tokenizer_class = FunnelTokenizer
rust_tokenizer_class = FunnelTokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
@classmethod
def setUpClass(cls):
super().setUpClass()
vocab_tokens = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
@classmethod
@use_cache_if_possible
@lru_cache(maxsize=64)
def get_tokenizer(cls, pretrained_name=None, **kwargs):
pretrained_name = pretrained_name or cls.tmpdirname
return FunnelTokenizer.from_pretrained(pretrained_name, **kwargs)
@classmethod
@use_cache_if_possible
@lru_cache(maxsize=64)
def get_rust_tokenizer(cls, pretrained_name=None, **kwargs):
pretrained_name = pretrained_name or cls.tmpdirname
return FunnelTokenizerFast.from_pretrained(pretrained_name, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00e9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00e9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_token_type_ids(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
inputs = tokenizer("UNwant\u00e9d,running")
sentence_len = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len)
inputs = tokenizer("UNwant\u00e9d,running", "UNwant\u00e9d,running")
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len + [1] * sentence_len)
| transformers/tests/models/funnel/test_tokenization_funnel.py/0 | {
"file_path": "transformers/tests/models/funnel/test_tokenization_funnel.py",
"repo_id": "transformers",
"token_count": 1434
} | 575 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Gemma3n model."""
import tempfile
import unittest
import numpy as np
import pytest
from datasets import load_dataset
from parameterized import parameterized
from transformers import (
AutoModelForCausalLM,
AutoProcessor,
AutoTokenizer,
Gemma3nAudioConfig,
Gemma3nAudioFeatureExtractor,
Gemma3nConfig,
Gemma3nTextConfig,
GenerationConfig,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_flash_attn,
require_read_token,
require_torch,
require_torch_gpu,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION,
ModelTesterMixin,
_test_eager_matches_sdpa_inference,
floats_tensor,
ids_tensor,
)
from ..gemma.test_modeling_gemma import GemmaModelTester
if is_torch_available():
import torch
from transformers import (
Gemma3nAudioEncoder,
Gemma3nForCausalLM,
Gemma3nForConditionalGeneration,
Gemma3nModel,
Gemma3nTextModel,
)
class Gemma3nAudioModelTester:
def __init__(
self,
parent,
batch_size=2,
num_channels=32, # feature_size / input_feat_size
sampling_rate=16_000,
raw_audio_length=8_000,
is_training=True,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.sampling_rate = sampling_rate
self.raw_audio_length = raw_audio_length
self.is_training = is_training
def get_feature_extractor_config(self):
return {
"feature_size": self.num_channels,
"sampling_rate": self.sampling_rate,
"padding_value": 0.0,
"return_attention_mask": True,
"frame_length_ms": 32.0,
"hop_length_ms": 10.0,
"dither": 0.0, # Important for determinism
}
def get_audio_encoder_config(self):
return Gemma3nAudioConfig(
input_feat_size=self.num_channels,
hidden_size=32,
conf_num_attention_heads=4,
conf_num_hidden_layers=2,
sscp_conv_channel_size=(16, 8),
conf_conv_kernel_size=3,
conf_attention_chunk_size=4,
conf_attention_context_left=5,
)
def prepare_config_and_inputs_for_common(self):
# Prepare inputs for the audio encoder
feature_extractor_config = self.get_feature_extractor_config()
audio_encoder_config = self.get_audio_encoder_config()
np.random.seed(0)
raw_speech_1 = np.sin(2 * np.pi * 440 * np.linspace(0, 1, self.raw_audio_length)).astype(np.float32)
raw_speech_2 = np.random.randn(self.raw_audio_length // 2).astype(np.float32)
raw_speech = [raw_speech_1, raw_speech_2]
feature_extractor = Gemma3nAudioFeatureExtractor(**feature_extractor_config)
audio_inputs = feature_extractor(raw_speech, return_tensors="pt")
input_features = audio_inputs["input_features"]
# The encoder expects a padding mask (True for padding), while the feature extractor
# returns an attention mask (True for valid tokens). We must invert it.
input_features_mask = ~audio_inputs["input_features_mask"].to(torch.bool)
inputs_dict = {
"audio_mel": input_features,
"audio_mel_mask": input_features_mask,
}
return audio_encoder_config, inputs_dict
@unittest.skip("Skipped for now!")
@require_torch
class Gemma3nAudioModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (Gemma3nAudioEncoder,) if is_torch_available() else ()
test_pruning = False
test_head_masking = False
test_missing_keys = False
is_generative = False
_is_stateful = True
main_input_name = "audio_mel"
test_initialization = False
test_can_init_all_missing_weights = False
def setUp(self):
self.model_tester = Gemma3nAudioModelTester(self)
self.config_tester = ConfigTester(self, config_class=Gemma3nAudioConfig, hidden_size=37)
torch.manual_seed(0)
# The following values are golden outputs from a deterministic run of the components.
# They are used to ensure that changes to the code do not alter the numerical output.
# Generated with seeds np.random.seed(0) and torch.manual_seed(0).
self.expected_input_features_shape = (2, 48, 32)
self.expected_input_features_slice = np.array([-5.733152, -5.337127, -4.916284, -4.378989, -3.7622747])
self.expected_input_features_mask_shape = (2, 48)
self.expected_input_features_mask_slice = np.array([True, True, True, True, False])
self.expected_encoder_output_shape = (2, 3, 32)
self.expected_encoder_output_slice = torch.tensor([-0.4159, 0.6459, 0.6305, 2.2902, 0.9683])
self.expected_encoder_mask_shape = (2, 3)
self.expected_encoder_mask_slice = torch.tensor([False, False, True])
# Prepare a shared feature extractor and raw audio for the tests
self.feature_extractor = Gemma3nAudioFeatureExtractor(**self.model_tester.get_feature_extractor_config())
np.random.seed(0)
raw_speech_1 = np.sin(2 * np.pi * 440 * np.linspace(0, 1, self.model_tester.raw_audio_length)).astype(
np.float32
)
raw_speech_2 = np.random.randn(self.model_tester.raw_audio_length // 2).astype(np.float32)
self.raw_speech = [raw_speech_1, raw_speech_2]
@unittest.skip("Audio encoder does not support attention output")
def test_attention_outputs(self):
pass
@unittest.skip("Audio encoder does not support hidden state output")
def test_hidden_states_output(self):
pass
@unittest.skip("Audio encoder returns a tuple, not a ModelOutput object, skipping equivalence test.")
def test_model_outputs_equivalence(self):
pass
@unittest.skip("Audio encoder does not support retaining gradients on hidden states/attentions.")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip("Audio encoder does not have a concept of token embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip("Audio encoder does not have a concept of token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip("This model has a complex downsampling scheme that is hard to test with the generic batching test.")
def test_batching_equivalence(self):
pass
def test_feature_extractor(self):
"""
Tests the feature extractor's output against pre-computed golden values.
This ensures the NumPy-based audio preprocessing is correct and consistent.
"""
audio_inputs = self.feature_extractor(
self.raw_speech, padding="longest", pad_to_multiple_of=128, return_tensors="np"
)
input_features = audio_inputs["input_features"]
self.assertEqual(input_features.shape, self.expected_input_features_shape)
np.testing.assert_allclose(input_features[0, 0, :5], self.expected_input_features_slice, rtol=1e-5, atol=1e-5)
print(input_features[0, 0, :5])
input_features_mask = audio_inputs["input_features_mask"]
self.assertEqual(input_features_mask.shape, self.expected_input_features_mask_shape)
# The second audio sample is shorter (22 frames vs 48), so its mask should become False at index 22
np.testing.assert_array_equal(input_features_mask[1, 21:26], self.expected_input_features_mask_slice)
def test_audio_encoder(self):
"""
Tests the audio encoder's forward pass against pre-computed golden values.
This ensures the PyTorch-based audio encoding model is correct and consistent.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = Gemma3nAudioEncoder(config).to(torch_device).eval()
with torch.no_grad():
encoder_output, encoder_mask = model(**inputs_dict)
print(encoder_output[0, 0, :5])
# Check output encodings
self.assertEqual(encoder_output.shape, self.expected_encoder_output_shape)
torch.testing.assert_close(
encoder_output[0, 0, :5], self.expected_encoder_output_slice.to(torch_device), rtol=1e-4, atol=1e-4
)
# Check output mask (True means padded)
# Second sample has 22 feature frames. After downsampling by 4 (conv) -> 5 frames. After downsampling by 4 (reduction) -> 1 frame.
# So the mask should be [False, True, True]
self.assertEqual(encoder_mask.shape, self.expected_encoder_mask_shape)
torch.testing.assert_close(encoder_mask[1, :], self.expected_encoder_mask_slice.to(torch_device))
class Gemma3nTextModelTester(GemmaModelTester):
activation_sparsity_pattern = None
forced_config_args = ["activation_sparsity_pattern"]
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
vocab_size_per_layer_input=99,
hidden_size=16,
hidden_size_per_layer_input=16,
num_hidden_layers=4, # override to correctly test sharing cache pattern
num_kv_shared_layers=2, # important to override
layer_types=[
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
], # similarly we want to test sharing on both types
num_attention_heads=2,
num_key_value_heads=2,
altup_num_inputs=2,
intermediate_size=21,
hidden_activation="gelu_pytorch_tanh",
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
is_decoder=False,
):
self._verify_model_attributes()
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.vocab_size_per_layer_input = vocab_size_per_layer_input
self.hidden_size = hidden_size
self.hidden_size_per_layer_input = hidden_size_per_layer_input
self.num_hidden_layers = num_hidden_layers
self.num_kv_shared_layers = num_kv_shared_layers
self.layer_types = layer_types
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.altup_num_inputs = altup_num_inputs
self.intermediate_size = intermediate_size
self.hidden_activation = hidden_activation
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.head_dim = self.hidden_size // self.num_attention_heads
self.is_decoder = is_decoder
if is_torch_available():
config_class = Gemma3nTextConfig
model_class = Gemma3nTextModel
for_causal_lm_class = Gemma3nForCausalLM
@require_torch
class Gemma3nTextModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Gemma3nTextModel, Gemma3nForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (Gemma3nForCausalLM,) if is_torch_available() else ()
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
def setUp(self):
self.model_tester = Gemma3nTextModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=Gemma3nConfig,
hidden_size=37,
text_config={"activation_sparsity_pattern": None},
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False
):
"Gemma3n has special hidden states shape with 1 additional dim (which is then reduced with projections)"
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (output_length - prompt_length))
# When `output_hidden_states=True`, each iteration of generate appends the hidden states corresponding to the
# new token(s)
# NOTE: `HybridCache` may have different lengths on different layers, if this test starts failing add more
# elaborate checks
for generated_length, iter_hidden_states in enumerate(hidden_states):
# regardless of using cache, the first forward pass will have the full prompt as input
if use_cache and generated_length > 0:
model_input_length = 1
else:
model_input_length = prompt_length + generated_length
expected_shape = (config.altup_num_inputs, batch_size, model_input_length, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
def test_eager_matches_sdpa_inference(
self,
name,
dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
):
"We need to relax a bit the `atols` for fp32 here due to the altup projections"
atols = {
("cpu", False, torch.float32): 1e-3, # this was relaxed
("cpu", False, torch.float16): 5e-3,
("cpu", False, torch.bfloat16): 1e-2,
("cpu", True, torch.float32): 1e-3, # this was relaxed
("cpu", True, torch.float16): 5e-3,
("cpu", True, torch.bfloat16): 1e-2,
("cuda", False, torch.float32): 1e-3, # this was relaxed
("cuda", False, torch.bfloat16): 1e-2,
("cuda", False, torch.float16): 5e-3,
("cuda", True, torch.float32): 1e-3, # this was relaxed
("cuda", True, torch.bfloat16): 1e-2,
("cuda", True, torch.float16): 5e-3,
}
_test_eager_matches_sdpa_inference(
self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels, atols=atols
)
@pytest.mark.generate
@unittest.skip(
"Gemma3n has a special shape for hidden states (due to per-layer projs) which is not compatible with contrastive decoding"
)
def test_contrastive_generate(self):
pass
@pytest.mark.generate
@unittest.skip(
"Gemma3n has a special shape for hidden states (due to per-layer projs) which is not compatible with contrastive decoding"
)
def test_contrastive_generate_dict_outputs_use_cache(self):
pass
@pytest.mark.generate
@unittest.skip(
"Gemma3n has a special shape for hidden states (due to per-layer projs) which is not compatible with contrastive decoding"
)
def test_contrastive_generate_low_memory(self):
pass
@pytest.mark.generate
@unittest.skip(
"Gemma3n has a special shape for hidden states (due to per-layer projs) which is not compatible with dola decoding"
)
def test_dola_decoding_sample(self):
pass
@pytest.mark.generate
@unittest.skip("Gemma3n does not support QuantizedCache as it performs cache manipulation in the forward pass")
def test_generate_with_quant_cache(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
class Gemma3nVision2TextModelTester:
text_config = {"activation_sparsity_pattern": None}
forced_config_args = ["text_config"]
def __init__(
self,
parent,
mm_tokens_per_image=2,
image_token_index=1,
boi_token_index=2,
eoi_token_index=3,
seq_length=25,
is_training=True,
vision_config={
"use_labels": True,
"image_size": 20,
"patch_size": 5,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"num_key_value_heads": 1,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
use_cache=False,
):
self.parent = parent
# `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify
self.mm_tokens_per_image = mm_tokens_per_image
self.image_token_index = image_token_index
self.boi_token_index = boi_token_index
self.eoi_token_index = eoi_token_index
self.llm_tester = Gemma3nTextModelTester(self.parent)
self.text_config = self.llm_tester.get_config()
self.vision_config = vision_config
self.seq_length = seq_length
self.pad_token_id = self.text_config.pad_token_id
self.num_hidden_layers = self.text_config.num_hidden_layers
self.vocab_size = self.text_config.vocab_size
self.hidden_size = self.text_config.hidden_size
self.num_attention_heads = self.text_config.num_attention_heads
self.is_training = is_training
self.batch_size = 3
self.num_channels = vision_config["num_channels"]
self.image_size = vision_config["image_size"]
self.encoder_seq_length = seq_length
self.use_cache = use_cache
def get_config(self):
return Gemma3nConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_index=self.image_token_index,
boi_token_index=self.boi_token_index,
eoi_token_index=self.eoi_token_index,
mm_tokens_per_image=self.mm_tokens_per_image,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
attention_mask = input_ids.ne(self.pad_token_id).to(torch_device)
# set the 3 first tokens to be image, and ensure that no other tokens are image tokens
# do not change this unless you modified image size or patch size
input_ids[input_ids == config.image_token_index] = self.pad_token_id
input_ids[:, :1] = config.image_token_index
token_type_ids = torch.zeros_like(input_ids)
token_type_ids[input_ids == config.image_token_index] = 1
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
return config, inputs_dict
@unittest.skip("Skipped for now!")
@require_torch
class Gemma3nVision2TextModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Gemma3nModel, Gemma3nForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (Gemma3nForConditionalGeneration,) if is_torch_available() else ()
test_headmasking = False
test_pruning = False
test_missing_keys = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
# MP works but offload doesn't work when the SigLIP MultiheadAttention is offloaded
# TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"]
# in the dispatch_model function
test_cpu_offload = False
test_disk_offload_safetensors = False
test_disk_offload_bin = False
def setUp(self):
self.model_tester = Gemma3nVision2TextModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=Gemma3nConfig,
hidden_size=37,
text_config={"activation_sparsity_pattern": None},
)
@unittest.skip(reason="SiglipVisionModel (vision backbone) does not support standalone training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="SiglipVisionModel (vision backbone) does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="SiglipVisionModel (vision backbone) does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(
reason="HybridCache can't be gathered because it is not iterable. Adding a simple iter and dumping `distributed_iterator`"
" as in Dynamic Cache doesnt work. NOTE: @gante all cache objects would need better compatibility with multi gpu setting"
)
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip("Failing because of unique cache (HybridCache)")
def test_model_outputs_equivalence(self, **kwargs):
pass
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
@unittest.skip("Gemma3n has HybridCache which is not compatible with assisted decoding")
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
@unittest.skip("Gemma3n has HybridCache which is not compatible with assisted decoding")
def test_prompt_lookup_decoding_matches_greedy_search(self, assistant_type):
pass
@pytest.mark.generate
@unittest.skip("Gemma3n has HybridCache which is not compatible with assisted decoding")
def test_assisted_decoding_sample(self):
pass
@unittest.skip("Gemma3n has HybridCache which is not compatible with dola decoding")
def test_dola_decoding_sample(self):
pass
@unittest.skip("Gemma3n has HybridCache and doesn't support continue from past kv")
def test_generate_continue_from_past_key_values(self):
pass
@unittest.skip("Gemma3n has HybridCache and doesn't support low_memory generation")
def test_beam_search_low_memory(self):
pass
@unittest.skip("Gemma3n has HybridCache and doesn't support contrastive generation")
def test_contrastive_generate(self):
pass
@unittest.skip("Gemma3n has HybridCache and doesn't support contrastive generation")
def test_contrastive_generate_dict_outputs_use_cache(self):
pass
@unittest.skip("Gemma3n has HybridCache and doesn't support contrastive generation")
def test_contrastive_generate_low_memory(self):
pass
@unittest.skip("Gemma3n has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.")
def test_generate_with_static_cache(self):
pass
@unittest.skip("Gemma3n has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
@unittest.skip(
reason="Siglip (vision backbone) uses the same initialization scheme as the Flax original implementation"
)
def test_initialization(self):
pass
@unittest.skip(
reason="Siglip has no FLEX attention, and we don't have a proper way to set/test attn in VLMs. TODO @raushan"
)
def test_flex_attention_with_grads(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
def test_automodelforcausallm(self):
"""
Regression test for #36741 -- make sure `AutoModelForCausalLM` works with a Gemma3n config, i.e. that
`AutoModelForCausalLM.from_pretrained` pulls the text config before loading the model
"""
config = self.model_tester.get_config()
model = Gemma3nForConditionalGeneration(config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
for_causal_lm = AutoModelForCausalLM.from_pretrained(tmp_dir)
self.assertIsInstance(for_causal_lm, Gemma3nForCausalLM)
@unittest.skip("Skipped for now!")
@slow
@require_torch_gpu
@require_read_token
class Gemma3nIntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("Google/gemma-3n-E4B-it", padding_side="left")
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
self.messages = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{"type": "image", "url": url},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
audio_ds = load_dataset(
"etechgrid/28.5k_wavfiles_dataset", "default", data_files="wav_dataset/103-1240-0000.wav"
)
self.audio_file_path = audio_ds["train"][0]["audio"]["path"]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_model_4b_bf16(self):
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(
model_id, low_cpu_mem_usage=True, dtype=torch.bfloat16
).to(torch_device)
inputs = self.processor.apply_chat_template(
self.messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
add_generation_prompt=True,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear blue water and a blue sky in the background. It looks like'] # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
def test_model_with_audio(self):
"""
Tests the full model pipeline with batched audio inputs provided as file paths.
This ensures the processor correctly loads and processes audio files.
"""
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(
model_id, low_cpu_mem_usage=True, dtype=torch.bfloat16
).to(torch_device)
messages = [
[
{
"role": "user",
"content": [
{"type": "text", "text": "Transcribe the following speech segment in English:"},
{"type": "audio", "audio": str(self.audio_file_path)},
],
}
],
]
inputs = self.processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
padding=True,
return_tensors="pt",
).to(torch_device, dtype=model.dtype)
input_len = inputs["input_ids"].shape[-1]
output = model.generate(**inputs, max_new_tokens=16, do_sample=False)
output = output[:, input_len:]
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = ["Chapter 1. Mrs. Rachel Lind is surprised.\n\nMrs. Rachel Lind"]
self.assertEqual(output_text, EXPECTED_TEXTS)
def test_model_4b_batch(self):
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(
model_id, low_cpu_mem_usage=False, dtype=torch.bfloat16
).to(torch_device)
messages_2 = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
},
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
{"type": "text", "text": "Are these images identical?"},
],
},
]
inputs = self.processor.apply_chat_template(
[self.messages, messages_2],
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
add_generation_prompt=True,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = [
'user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear turquoise water and a blue sky in the background. It looks like',
"user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, these images are not identical. \n\nHere's a breakdown of the differences:\n\n* **Image 1:** Shows a cow"
] # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
def test_model_4b_crops(self):
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(
model_id, low_cpu_mem_usage=True, dtype=torch.bfloat16
).to(torch_device)
crop_config = {
"images_kwargs": {
"do_pan_and_scan": True,
"pan_and_scan_max_num_crops": 448,
"pan_and_scan_min_crop_size": 32,
"pan_and_scan_min_ratio_to_activate": 0.3,
}
}
inputs = self.processor.apply_chat_template(
self.messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
add_generation_prompt=True,
**crop_config,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_NUM_IMAGES = 3 # one for the origin image and two crops of images
EXPECTED_TEXTS = ['user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a beach with a turquoise ocean and blue sky in the background.'] # fmt: skip
self.assertEqual(len(inputs["pixel_values"]), EXPECTED_NUM_IMAGES)
self.assertEqual(output_text, EXPECTED_TEXTS)
def test_model_4b_multiimage(self):
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(
model_id, low_cpu_mem_usage=True, dtype=torch.bfloat16
).to(torch_device)
messages = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
{"type": "text", "text": "What do you see here?"},
],
},
]
inputs = self.processor.apply_chat_template(
messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
add_generation_prompt=True,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = ["user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nOkay, let's break down what I see in this image:\n\n**Overall Scene:**\n\nIt looks like a street scene in a vibrant,"] # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
def test_model_1b_text_only(self):
model_id = "google/gemma-3-1b-it"
model = Gemma3nForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, dtype=torch.bfloat16).to(
torch_device
)
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
inputs = tokenizer("Write a poem about Machine Learning.", return_tensors="pt").to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a river deep,\nWith patterns hidden, secrets sleep.\nA neural net, a watchful eye,\nLearning'] # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
# TODO: raushan FA2 generates gibberish for no reason, check later
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
def test_model_4b_flash_attn(self):
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(
model_id, dtype=torch.bfloat16, attn_implementation="flash_attention_2"
).to(torch_device)
inputs = self.processor.apply_chat_template(
self.messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
add_generation_prompt=True,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown and white cow standing on a sandy beach next to a turquoise ocean. It looks like a very sunny and'] # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
@parameterized.expand([("flash_attention_2",), ("sdpa",), ("eager",)])
def test_generation_beyond_sliding_window(self, attn_implementation: str):
"""Test that we can correctly generate beyond the sliding window. This is non trivial as
we need to correctly slice the attention mask in all cases (because we use a HybridCache).
Outputs for every attention functions should be coherent and identical.
"""
model_id = "google/gemma-3-1b-it"
input_text = [
"This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens
"A list of colors: red, blue", # This will almost all be padding tokens
]
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device)
model = AutoModelForCausalLM.from_pretrained(
model_id, attn_implementation=attn_implementation, dtype=torch.float16
).to(torch_device)
# Make sure prefill is larger than sliding window
input_size = inputs.input_ids.shape[-1]
self.assertTrue(input_size > model.config.sliding_window)
out = model.generate(**inputs, max_new_tokens=20)[:, input_size:]
output_text = tokenizer.batch_decode(out)
EXPECTED_COMPLETIONS = [" and I'm going to take a walk.\n\nI really enjoy the scenery, and I'", ", green, yellow, orange, purple, brown, black, white, gray.\n\nI'"] # fmt: skip
self.assertEqual(output_text, EXPECTED_COMPLETIONS)
def test_generation_beyond_sliding_window_with_generation_config(self):
"""
Same as `test_generation_beyond_sliding_window`, but passing a GenerationConfig. Regression test for #36684 --
ensures `cache_implementation='hybrid'` is correctly inherited from the base `model.generation_config`.
"""
model_id = "google/gemma-3-1b-it"
attn_implementation = "sdpa"
input_text = [
"This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens
"A list of colors: red, blue", # This will almost all be padding tokens
]
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device)
model = AutoModelForCausalLM.from_pretrained(
model_id, attn_implementation=attn_implementation, dtype=torch.float16
).to(torch_device)
# Make sure prefill is larger than sliding window
input_size = inputs.input_ids.shape[-1]
self.assertTrue(input_size > model.config.sliding_window)
generation_config = GenerationConfig(max_new_tokens=20)
out = model.generate(**inputs, generation_config=generation_config)[:, input_size:]
output_text = tokenizer.batch_decode(out)
EXPECTED_COMPLETIONS = [" and I'm going to take a walk.\n\nI really enjoy the scenery, and I'", ", green, yellow, orange, purple, brown, black, white, gray.\n\nI'"] # fmt: skip
self.assertEqual(output_text, EXPECTED_COMPLETIONS)
| transformers/tests/models/gemma3n/test_modeling_gemma3n.py/0 | {
"file_path": "transformers/tests/models/gemma3n/test_modeling_gemma3n.py",
"repo_id": "transformers",
"token_count": 17312
} | 576 |
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GLM-4.1V model."""
import copy
import gc
import unittest
from transformers import (
AutoProcessor,
Glm4vMoeConfig,
Glm4vMoeForConditionalGeneration,
Glm4vMoeModel,
is_torch_available,
)
from transformers.testing_utils import (
require_flash_attn,
require_torch,
require_torch_gpu,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
class Glm4vMoeVisionText2TextModelTester:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=112,
video_start_token_id=3,
video_end_token_id=4,
image_start_token_id=5,
image_end_token_id=6,
image_token_id=7,
video_token_id=8,
is_training=True,
text_config={
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 22,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 1,
"output_channels": 64,
"hidden_act": "silu",
"max_position_embeddings": 512,
"rope_scaling": {"type": "default", "mrope_section": [1, 1]},
"rope_theta": 10000,
"tie_word_embeddings": True,
"bos_token_id": 0,
"eos_token_id": 0,
"pad_token_id": 0,
"n_routed_experts": 8,
"n_shared_experts": 1,
"n_group": 1,
"topk_group": 1,
"num_experts_per_tok": 8,
},
vision_config={
"depth": 2,
"hidden_act": "silu",
"hidden_size": 48,
"out_hidden_size": 16,
"intermediate_size": 22,
"patch_size": 14,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.hidden_size = text_config["hidden_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.vocab_size = text_config["vocab_size"]
self.num_image_tokens = 64
self.seq_length = seq_length + self.num_image_tokens
self.n_routed_experts = text_config["n_routed_experts"]
self.n_shared_experts = text_config["n_shared_experts"]
self.num_experts_per_tok = text_config["num_experts_per_tok"]
self.n_group = text_config["n_group"]
self.topk_group = text_config["topk_group"]
def get_config(self):
return Glm4vMoeConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
video_start_token_id=self.video_start_token_id,
video_end_token_id=self.video_end_token_id,
image_start_token_id=self.image_start_token_id,
image_end_token_id=self.image_end_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.video_start_token_id] = self.pad_token_id
input_ids[input_ids == self.image_start_token_id] = self.pad_token_id
input_ids[input_ids == self.video_end_token_id] = self.pad_token_id
input_ids[input_ids == self.image_end_token_id] = self.pad_token_id
input_ids[:, 0] = self.image_start_token_id
input_ids[:, 1 : 1 + self.num_image_tokens] = self.image_token_id
input_ids[:, 1 + self.num_image_tokens] = self.image_end_token_id
patch_size = config.vision_config.patch_size
patches_per_side = self.image_size // patch_size
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor([[1, patches_per_side, patches_per_side]] * self.batch_size),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class Glm4vMoeModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Glm4vMoeModel, Glm4vMoeForConditionalGeneration) if is_torch_available() else ()
test_pruning = False
test_head_masking = False
test_torchscript = False
model_split_percents = [0.7, 0.9] # model too big to split at 0.5
_is_composite = True
def setUp(self):
self.model_tester = Glm4vMoeVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Glm4vMoeConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
# Glm4vMoe has images shaped as (bs*patch_len, dim) so we can't slice to batches in generate
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# We don't want a few model inputs in our model input dictionary for generation tests
input_keys_to_ignore = [
# we don't want to mask attention heads
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
# we don't want encoder-decoder models to start from filled decoder ids
"decoder_input_ids",
"decoder_attention_mask",
# we'll set cache use in each test differently
"use_cache",
# Ignore labels if it is in the input dict
"labels",
# model-specific exceptions should overload/overwrite this function
]
# The diff from the general `prepare_config_and_inputs_for_generate` lies here
patch_size = config.vision_config.patch_size
filtered_image_length = batch_size * (self.model_tester.image_size**2) // (patch_size**2)
filtered_inputs_dict = {
k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v
for k, v in inputs_dict.items()
if k not in input_keys_to_ignore
}
filtered_inputs_dict["pixel_values"] = inputs_dict["pixel_values"][:filtered_image_length]
# It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks)
text_gen_config = config.get_text_config(decoder=True)
if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None:
text_gen_config.pad_token_id = (
text_gen_config.eos_token_id
if isinstance(text_gen_config.eos_token_id, int)
else text_gen_config.eos_token_id[0]
)
text_gen_config.eos_token_id = None
text_gen_config.forced_eos_token_id = None
return config, filtered_inputs_dict
@unittest.skip(reason="No available kernels - not supported")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="Size mismatch")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip("GLM4's moe is not compatible `token_indices, weight_indices = torch.where(mask)`.")
def test_generate_compilation_all_outputs(self):
pass
@unittest.skip("Error with compilation")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["image_grid_thw"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["image_grid_thw"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
@require_torch
class Glm4vMoeIntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("zai-org/GLM-4.5V")
self.message = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
self.message2 = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
@slow
def test_small_model_integration_test(self):
model = Glm4vMoeForConditionalGeneration.from_pretrained("zai-org/GLM-4.5V", dtype="auto", device_map="auto")
inputs = self.processor.apply_chat_template(
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
)
expected_input_ids = [151331, 151333, 151336, 198, 151339, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343] # fmt: skip
assert expected_input_ids == inputs.input_ids[0].tolist()[:17]
expected_pixel_slice = torch.tensor(
[
[-0.0988, -0.0842, -0.0842],
[-0.5660, -0.5514, -0.4200],
[-0.0259, -0.0259, -0.0259],
[-0.1280, -0.0988, -0.2010],
[-0.4638, -0.5806, -0.6974],
[-1.2083, -1.2229, -1.2083],
],
dtype=torch.float32,
device="cpu",
)
assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3)
# verify generation
inputs = inputs.to(torch_device)
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = "\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat. Specifically, it looks"
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch(self):
model = Glm4vMoeForConditionalGeneration.from_pretrained("zai-org/GLM-4.5V", dtype="auto", device_map="auto")
batch_messages = [self.message] * 2
inputs = self.processor.apply_chat_template(
batch_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat. Specifically, it looks",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat. Specifically, it looks"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_with_video(self):
processor = AutoProcessor.from_pretrained("zai-org/GLM-4.5V", max_image_size={"longest_edge": 50176})
model = Glm4vMoeForConditionalGeneration.from_pretrained(
"zai-org/GLM-4.5V", dtype=torch.float16, device_map="auto"
)
questions = ["Describe this video."] * 2
video_urls = [
"https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4"
] * 2
messages = [
[
{
"role": "user",
"content": [
{
"type": "video",
"video": video_url,
},
{"type": "text", "text": question},
],
}
]
for question, video_url in zip(questions, video_urls)
]
inputs = processor.apply_chat_template(
messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\n012345Describe this video.\n<think>Got it, let's analyze the video. First, the scene is a room with a wooden floor, maybe a traditional Japanese room with tatami",
"\n012345Describe this video.\n<think>Got it, let's analyze the video. First, the scene is a room with a wooden floor, maybe a traditional Japanese room with tatami"
] # fmt: skip
self.assertEqual(
processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_expand(self):
model = Glm4vMoeForConditionalGeneration.from_pretrained("zai-org/GLM-4.5V", dtype="auto", device_map="auto")
inputs = self.processor.apply_chat_template(
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False, num_beams=2, num_return_sequences=2)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat, specifically"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch_wo_image(self):
model = Glm4vMoeForConditionalGeneration.from_pretrained("zai-org/GLM-4.5V", dtype="auto", device_map="auto")
message_wo_image = [
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
]
batched_messages = [self.message, message_wo_image]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat. Specifically, it looks",
'\nWho are you?\n<think>Got it, the user is asking "Who are you?" I need to respond appropriately. First, I should clarify that I\'m an AI assistant'
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch_different_resolutions(self):
model = Glm4vMoeForConditionalGeneration.from_pretrained("zai-org/GLM-4.5V", dtype="auto", device_map="auto")
batched_messages = [self.message, self.message2]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat. Specifically, it looks",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. Wait, the animals here are cats, not dogs. The question is about a dog, but"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_flash_attn
@require_torch_gpu
def test_small_model_integration_test_batch_flashatt2(self):
model = Glm4vMoeForConditionalGeneration.from_pretrained(
"zai-org/GLM-4.5V",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
batched_messages = [self.message, self.message2]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture has a stocky build, thick fur, and a face that's",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. Wait, the animals here are cats, not dogs. The question is about a dog, but"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_flash_attn
@require_torch_gpu
def test_small_model_integration_test_batch_wo_image_flashatt2(self):
model = Glm4vMoeForConditionalGeneration.from_pretrained(
"zai-org/GLM-4.5V",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
message_wo_image = [
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
]
batched_messages = [self.message, message_wo_image]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat. Specifically, it looks",
'\nWho are you?\n<think>Got it, let\'s look at the question. The user is asking "Who are you?" which is a common question when someone meets an AI'
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
| transformers/tests/models/glm4v_moe/test_modeling_glm4v_moe.py/0 | {
"file_path": "transformers/tests/models/glm4v_moe/test_modeling_glm4v_moe.py",
"repo_id": "transformers",
"token_count": 10889
} | 577 |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GPTNeoX model."""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, DynamicCache, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXRotaryEmbedding
class GPTNeoXModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=64,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.pad_token_id = vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_labels = None
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return config, input_ids, input_mask, token_labels
def get_config(self):
return GPTNeoXConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
def prepare_config_and_inputs_for_decoder(self):
config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs()
config.is_decoder = True
return config, input_ids, input_mask, token_labels
def create_and_check_model(self, config, input_ids, input_mask):
model = GPTNeoXModel(config=config)
model.to(torch_device)
model.eval()
_ = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_as_decoder(self, config, input_ids, input_mask):
config.add_cross_attention = True
model = GPTNeoXModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(self, config, input_ids, input_mask, token_labels):
model = GPTNeoXForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(self, config, input_ids, input_mask, token_labels):
config.num_labels = self.num_labels
model = GPTNeoXForQuestionAnswering(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(self, config, input_ids, input_mask, token_labels):
config.num_labels = self.num_labels
model = GPTNeoXForSequenceClassification(config)
model.to(torch_device)
model.eval()
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
result = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(self, config, input_ids, input_mask, token_labels):
config.num_labels = self.num_labels
model = GPTNeoXForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_decoder_model_past_large_inputs(self, config, input_ids, input_mask):
config.is_decoder = True
model = GPTNeoXForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True)
output_from_no_past = output_from_no_past["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_cached_forward_with_and_without_attention_mask(self, config, input_ids, *args):
# Relevant issue: https://github.com/huggingface/transformers/issues/31943
model = GPTNeoXModel(config)
model.to(torch_device)
model.eval()
# We want this for SDPA, eager works with a `None` attention mask
assert model.config._attn_implementation == "sdpa", (
"This test assumes the model to have the SDPA implementation for its attention calculations."
)
# Prepare cache and non_cache input, needs a full attention mask
cached_len = input_ids.shape[-1] // 2
input_mask = torch.ones(size=input_ids.size()).to(torch_device)
cache_inputs = {"input_ids": input_ids[:, :cached_len], "attention_mask": input_mask[:, :cached_len]}
non_cache_inputs = {"input_ids": input_ids[:, cached_len:], "attention_mask": input_mask}
def copy_cache(cache: DynamicCache):
"""Deep copy a DynamicCache to reuse the same one multiple times."""
new_cache = cache
for i in range(len(cache)):
new_cache.layers[i].keys = cache.layers[i].keys.clone()
new_cache.layers[i].values = cache.layers[i].values.clone()
# Cached forward once with the attention mask provided and the other time without it (which should assume full attention)
# We need to run both on a copy of the cache, otherwise it is modified in-place
cache_outputs = model(**cache_inputs)
cache = cache_outputs.past_key_values
full_outputs_with_attention_mask = model(
**non_cache_inputs, past_key_values=copy_cache(cache)
).last_hidden_state
full_outputs_without_attention_mask = model(
non_cache_inputs["input_ids"], past_key_values=copy_cache(cache)
).last_hidden_state
self.parent.assertTrue(
torch.allclose(full_outputs_with_attention_mask, full_outputs_without_attention_mask, atol=1e-5)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask, token_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class GPTNeoXModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
test_pruning = False
test_missing_keys = False
test_model_parallel = False
test_head_masking = False
def setUp(self):
self.model_tester = GPTNeoXModelTester(self)
self.config_tester = ConfigTester(self, config_class=GPTNeoXConfig, hidden_size=64, num_attention_heads=8)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(config, input_ids, input_mask)
def test_model_as_decoder(self):
config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask)
def test_model_as_decoder_with_default_input_mask(self):
config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask)
def test_decoder_model_past_large_inputs(self):
config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(config, input_ids, input_mask)
def test_model_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_model_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_model_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_model_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_cached_forward_with_and_without_attention_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_cached_forward_with_and_without_attention_mask(*config_and_inputs)
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@parameterized.expand([("linear",), ("dynamic",)])
def test_model_rope_scaling_from_config(self, scaling_type):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
short_input = ids_tensor([1, 10], config.vocab_size)
long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
original_model = GPTNeoXModel(config)
original_model.to(torch_device)
original_model.eval()
original_short_output = original_model(short_input).last_hidden_state
original_long_output = original_model(long_input).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
config.rope_scaling = {"type": scaling_type, "factor": 10.0}
scaled_model = GPTNeoXModel(config)
scaled_model.to(torch_device)
scaled_model.eval()
scaled_short_output = scaled_model(short_input).last_hidden_state
scaled_long_output = scaled_model(long_input).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5))
def test_model_rope_scaling(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
scaling_factor = 10
short_input_length = 10
long_input_length = int(config.max_position_embeddings * 1.5)
# Inputs
x = torch.randn(
1, dtype=torch.float32, device=torch_device
) # used exclusively to get the dtype and the device
position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device)
position_ids_short = position_ids_short.unsqueeze(0)
position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device)
position_ids_long = position_ids_long.unsqueeze(0)
# Sanity check original RoPE
original_rope = GPTNeoXRotaryEmbedding(config).to(torch_device)
original_cos_short, original_sin_short = original_rope(x, position_ids_short)
original_cos_long, original_sin_long = original_rope(x, position_ids_long)
torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :])
torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :])
# Sanity check linear RoPE scaling
# New position "x" should match original position with index "x/scaling_factor"
config.rope_scaling = {"type": "linear", "factor": scaling_factor}
linear_scaling_rope = GPTNeoXRotaryEmbedding(config).to(torch_device)
linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short)
linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long)
torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :])
torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :])
for new_position in range(0, long_input_length, scaling_factor):
original_position = int(new_position // scaling_factor)
torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :])
torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :])
# Sanity check Dynamic NTK RoPE scaling
# Scaling should only be observed after a long input is fed. We can observe that the frequencies increase
# with scaling_factor (or that `inv_freq` decreases)
config.rope_scaling = {"type": "dynamic", "factor": scaling_factor}
ntk_scaling_rope = GPTNeoXRotaryEmbedding(config).to(torch_device)
ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short)
ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long)
torch.testing.assert_close(ntk_cos_short, original_cos_short)
torch.testing.assert_close(ntk_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_sin_long, original_sin_long)
self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all())
@require_torch
class GPTNeoXLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_gptneox(self):
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped")
for checkpointing in [True, False]:
model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped")
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(torch_device)
inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device)
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
expected_output = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=20)
output_str = tokenizer.batch_decode(output_ids)[0]
self.assertEqual(output_str, expected_output)
@slow
def test_lm_generate_flex_attn_gptneox(self):
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped")
for checkpointing in [True, False]:
model = GPTNeoXForCausalLM.from_pretrained(
"EleutherAI/pythia-410m-deduped", attn_implementation="flex_attention"
)
self.assertTrue(model.config._attn_implementation == "flex_attention")
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(torch_device)
inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device)
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
expected_output = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=20)
output_str = tokenizer.batch_decode(output_ids)[0]
self.assertEqual(output_str, expected_output)
def pythia_integration_test(self):
model_name_or_path = "EleutherAI/pythia-70m"
model = GPTNeoXForCausalLM.from_pretrained(model_name_or_path, dtype=torch.float16).to(torch_device)
EXPECTED_LOGITS = torch.tensor([1069.0000, 228.7500, 1072.0000, 1072.0000, 1069.0000, 1068.0000, 1068.0000, 1071.0000, 1071.0000, 1071.0000, 1073.0000, 1070.0000, 1071.0000, 1075.0000, 1073.0000, 1075.0000, 1074.0000, 1069.0000, 1072.0000, 1071.0000, 1071.0000, 1071.0000, 1070.0000, 1069.0000, 1069.0000, 1069.0000, 1070.0000, 1075.0000, 1073.0000, 1074.0000]) # fmt: skip
input_ids = [29, 93, 303, 64, 5478, 49651, 10394, 187, 34, 12939, 875]
# alternative: tokenizer('<|im_start|>system\nA chat between')
input_ids = torch.as_tensor(input_ids)[None].to(torch_device)
outputs = model(input_ids)["logits"][:, -1][0, :30]
torch.testing.assert_close(EXPECTED_LOGITS, outputs, rtol=1e-5, atol=1e-5)
| transformers/tests/models/gpt_neox/test_modeling_gpt_neox.py/0 | {
"file_path": "transformers/tests/models/gpt_neox/test_modeling_gpt_neox.py",
"repo_id": "transformers",
"token_count": 9812
} | 578 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GraniteMoe model."""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GraniteMoeConfig, is_torch_available, set_seed
from transformers.testing_utils import (
Expectations,
require_read_token,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
GraniteMoeForCausalLM,
GraniteMoeModel,
)
from transformers.models.granitemoe.modeling_granitemoe import (
GraniteMoeRotaryEmbedding,
)
class GraniteMoeModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return GraniteMoeConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = GraniteMoeModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class GraniteMoeModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
GraniteMoeModel,
GraniteMoeForCausalLM,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": GraniteMoeModel,
"text-generation": GraniteMoeForCausalLM,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
fx_compatible = False
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
def setUp(self):
self.model_tester = GraniteMoeModelTester(self)
self.config_tester = ConfigTester(self, config_class=GraniteMoeConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
@parameterized.expand([("linear",), ("dynamic",)])
def test_model_rope_scaling_from_config(self, scaling_type):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
short_input = ids_tensor([1, 10], config.vocab_size)
long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
original_model = GraniteMoeModel(config)
original_model.to(torch_device)
original_model.eval()
original_short_output = original_model(short_input).last_hidden_state
original_long_output = original_model(long_input).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
config.rope_scaling = {"type": scaling_type, "factor": 10.0}
scaled_model = GraniteMoeModel(config)
scaled_model.to(torch_device)
scaled_model.eval()
scaled_short_output = scaled_model(short_input).last_hidden_state
scaled_long_output = scaled_model(long_input).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5))
def test_model_rope_scaling(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
scaling_factor = 10
short_input_length = 10
long_input_length = int(config.max_position_embeddings * 1.5)
# Inputs
x = torch.randn(
1, dtype=torch.float32, device=torch_device
) # used exclusively to get the dtype and the device
position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device)
position_ids_short = position_ids_short.unsqueeze(0)
position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device)
position_ids_long = position_ids_long.unsqueeze(0)
# Sanity check original RoPE
original_rope = GraniteMoeRotaryEmbedding(config=config).to(torch_device)
original_cos_short, original_sin_short = original_rope(x, position_ids_short)
original_cos_long, original_sin_long = original_rope(x, position_ids_long)
torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :])
torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :])
# Sanity check linear RoPE scaling
# New position "x" should match original position with index "x/scaling_factor"
config.rope_scaling = {"type": "linear", "factor": scaling_factor}
linear_scaling_rope = GraniteMoeRotaryEmbedding(config=config).to(torch_device)
linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short)
linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long)
torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :])
torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :])
for new_position in range(0, long_input_length, scaling_factor):
original_position = int(new_position // scaling_factor)
torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :])
torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :])
# Sanity check Dynamic NTK RoPE scaling
# Scaling should only be observed after a long input is fed. We can observe that the frequencies increase
# with scaling_factor (or that `inv_freq` decreases)
config.rope_scaling = {"type": "dynamic", "factor": scaling_factor}
ntk_scaling_rope = GraniteMoeRotaryEmbedding(config=config).to(torch_device)
ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short)
ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long)
torch.testing.assert_close(ntk_cos_short, original_cos_short)
torch.testing.assert_close(ntk_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_sin_long, original_sin_long)
self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all())
# Sanity check Yarn RoPE scaling
# Scaling should be over the entire input
config.rope_scaling = {"type": "yarn", "factor": scaling_factor}
yarn_scaling_rope = GraniteMoeRotaryEmbedding(config=config).to(torch_device)
yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short)
yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long)
torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :])
torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :])
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_short, original_cos_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_long, original_sin_long)
@require_torch_accelerator
class GraniteMoeIntegrationTest(unittest.TestCase):
@slow
@require_read_token
def test_model_3b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = GraniteMoeForCausalLM.from_pretrained("ibm/PowerMoE-3b", device_map="auto")
with torch.no_grad():
out = model(torch.tensor([input_ids]).to(torch_device))
# fmt: off
# Expected mean on dim = -1
EXPECTED_MEANS = Expectations(
{
("xpu", 3): torch.tensor([[-4.4005, -3.6689, -3.6187, -2.8308, -3.9871, -3.1001, -2.8738, -2.8063]]),
("cuda", 7): torch.tensor([[-2.2122, -1.6632, -2.9269, -2.3344, -2.0143, -3.0146, -2.6839, -2.5610]]),
("cuda", 8): torch.tensor([[-4.4005, -3.6689, -3.6187, -2.8308, -3.9871, -3.1001, -2.8738, -2.8063]]),
}
)
EXPECTED_MEAN = EXPECTED_MEANS.get_expectation()
torch.testing.assert_close(EXPECTED_MEAN.to(torch_device), out.logits.float().mean(-1), rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:15]
EXPECTED_SLICES = Expectations(
{
("xpu", 3): torch.tensor([[2.5479, -9.2123, -9.2121, -9.2175, -9.2122, -1.5024, -9.2121, -9.2122, -9.2161, -9.2122, -6.3100, -3.6223, -3.6377, -5.2542, -5.2523]]),
("cuda", 7): torch.tensor([[4.8785, -2.2890, -2.2892, -2.2885, -2.2890, -3.5007, -2.2897, -2.2892, -2.2895, -2.2891, -2.2887, -2.2882, -2.2889, -2.2898, -2.2892]]),
("cuda", 8): torch.tensor([[2.5479, -9.2124, -9.2121, -9.2175, -9.2122, -1.5024, -9.2121, -9.2122, -9.2162, -9.2122, -6.3101, -3.6224, -3.6377, -5.2542, -5.2524]]),
}
)
EXPECTED_SLICE = EXPECTED_SLICES.get_expectation()
# fmt: on
self.assertTrue(
torch.allclose(
EXPECTED_SLICE.to(torch_device),
out.logits[0, 0, :15].float(),
atol=1e-3,
rtol=1e-3,
)
)
@slow
def test_model_3b_generation(self):
# ground truth text generated with dola_layers="low", repetition_penalty=1.2
# fmt: off
EXPECTED_TEXT_COMPLETIONS = Expectations(
{
("xpu", 3): (
"Simply put, the theory of relativity states that 1) the speed of light is constant, and 2) the speed of light is the same for all observers.\n\n"
"The first part is easy to understand. The second part is a little more difficult.\n\n"
"The second part of the theory of relativity is a little more difficult to understand.\n"
),
("cuda", 7): (
"Simply put, the theory of relativity states that \n$$\n\\frac{d^2x^\\mu}{d\\tau^2} = "
"\\frac{1}{c^2}\\frac{d^2x^\\mu}{dt^2}\n$$\nwhere $x^\\mu$ is a four-vector, $\\tau$ is the proper time"
),
("cuda", 8): (
"Simply put, the theory of relativity states that 1) the speed of light is constant, and 2) the speed of light is the same for all observers.\n\n"
"The first part is easy to understand. The second part is a little more difficult.\n\n"
"The second part of the theory of relativity is a little more difficult to understand.\n"
),
}
)
# fmt: on
EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation()
prompt = "Simply put, the theory of relativity states that "
tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b")
model = GraniteMoeForCausalLM.from_pretrained("ibm/PowerMoE-3b", device_map="auto")
model_inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
# greedy generation outputs
generated_ids = model.generate(**model_inputs, max_new_tokens=64, top_p=None, temperature=1, do_sample=False)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
| transformers/tests/models/granitemoe/test_modeling_granitemoe.py/0 | {
"file_path": "transformers/tests/models/granitemoe/test_modeling_granitemoe.py",
"repo_id": "transformers",
"token_count": 7820
} | 579 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from torch import nn
from transformers import HGNetV2Config
from transformers.testing_utils import require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
from transformers import HGNetV2Backbone, HGNetV2ForImageClassification
class HGNetV2ModelTester:
def __init__(
self,
parent,
batch_size=3,
image_size=32,
num_channels=3,
embeddings_size=10,
hidden_sizes=[64, 128, 256, 512],
stage_in_channels=[16, 64, 128, 256],
stage_mid_channels=[16, 32, 64, 128],
stage_out_channels=[64, 128, 256, 512],
stage_num_blocks=[1, 1, 2, 1],
stage_downsample=[False, True, True, True],
stage_light_block=[False, False, True, True],
stage_kernel_size=[3, 3, 5, 5],
stage_numb_of_layers=[3, 3, 3, 3],
stem_channels=[3, 16, 16],
depths=[1, 1, 2, 1],
is_training=True,
use_labels=True,
hidden_act="relu",
num_labels=3,
scope=None,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.embeddings_size = embeddings_size
self.hidden_sizes = hidden_sizes
self.stage_in_channels = stage_in_channels
self.stage_mid_channels = stage_mid_channels
self.stage_out_channels = stage_out_channels
self.stage_num_blocks = stage_num_blocks
self.stage_downsample = stage_downsample
self.stage_light_block = stage_light_block
self.stage_kernel_size = stage_kernel_size
self.stage_numb_of_layers = stage_numb_of_layers
self.stem_channels = stem_channels
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.hidden_act = hidden_act
self.num_labels = num_labels
self.scope = scope
self.num_stages = len(hidden_sizes)
self.out_features = out_features
self.out_indices = out_indices
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return HGNetV2Config(
num_channels=self.num_channels,
embeddings_size=self.embeddings_size,
hidden_sizes=self.hidden_sizes,
stage_in_channels=self.stage_in_channels,
stage_mid_channels=self.stage_mid_channels,
stage_out_channels=self.stage_out_channels,
stage_num_blocks=self.stage_num_blocks,
stage_downsample=self.stage_downsample,
stage_light_block=self.stage_light_block,
stage_kernel_size=self.stage_kernel_size,
stage_numb_of_layers=self.stage_numb_of_layers,
stem_channels=self.stem_channels,
depths=self.depths,
hidden_act=self.hidden_act,
num_labels=self.num_labels,
out_features=self.out_features,
out_indices=self.out_indices,
)
def create_and_check_backbone(self, config, pixel_values, labels):
model = HGNetV2Backbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:])
# verify backbone works with out_features=None
config.out_features = None
model = HGNetV2Backbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = HGNetV2ForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class RTDetrResNetBackboneTest(BackboneTesterMixin, unittest.TestCase):
all_model_classes = (HGNetV2Backbone,) if is_torch_available() else ()
has_attentions = False
config_class = HGNetV2Config
def setUp(self):
self.model_tester = HGNetV2ModelTester(self)
@require_torch
class HGNetV2ForImageClassificationTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some tests of test_modeling_common.py, as TextNet does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (HGNetV2ForImageClassification, HGNetV2Backbone) if is_torch_available() else ()
pipeline_model_mapping = {"image-classification": HGNetV2ForImageClassification} if is_torch_available() else {}
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_torch_exportable = True
has_attentions = False
def setUp(self):
self.model_tester = HGNetV2ModelTester(self)
@unittest.skip(reason="Does not work on the tiny model.")
def test_model_parallelism(self):
super().test_model_parallelism()
@unittest.skip(reason="HGNetV2 does not output attentions")
def test_attention_outputs(self):
pass
@unittest.skip(reason="HGNetV2 does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="HGNetV2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="HGNetV2 does not support input and output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="HGNetV2 does not have a model")
def test_model(self):
pass
@unittest.skip(reason="Not relevant for the model")
def test_can_init_all_missing_weights(self):
pass
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config=config)
for name, module in model.named_modules():
if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1),
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
self.assertTrue(
torch.all(module.bias == 0),
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
self.assertEqual(len(hidden_states), self.model_tester.num_stages + 1)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
layers_type = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
config.layer_type = layer_type
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="Retain_grad is not supposed to be tested")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="TextNet does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@unittest.skip(reason="HGNetV2 does not use model")
def test_model_from_pretrained(self):
pass
| transformers/tests/models/hgnet_v2/test_modeling_hgnet_v2.py/0 | {
"file_path": "transformers/tests/models/hgnet_v2/test_modeling_hgnet_v2.py",
"repo_id": "transformers",
"token_count": 4725
} | 580 |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin
if is_vision_available():
from PIL import Image
from transformers import Idefics2ImageProcessor
if is_torchvision_available():
from transformers import Idefics2ImageProcessorFast
if is_torch_available():
import torch
class Idefics2ImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
num_images=1,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_convert_rgb=True,
do_pad=True,
do_image_splitting=True,
):
size = size if size is not None else {"shortest_edge": 378, "longest_edge": 980}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.num_images = num_images
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_convert_rgb = do_convert_rgb
self.do_pad = do_pad
self.do_image_splitting = do_image_splitting
def prepare_image_processor_dict(self):
return {
"do_convert_rgb": self.do_convert_rgb,
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
"do_image_splitting": self.do_image_splitting,
}
def get_expected_values(self, image_inputs, batched=False):
if not batched:
shortest_edge = self.size["shortest_edge"]
longest_edge = self.size["longest_edge"]
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
aspect_ratio = w / h
if w > h and w >= longest_edge:
w = longest_edge
h = int(w / aspect_ratio)
elif h > w and h >= longest_edge:
h = longest_edge
w = int(h * aspect_ratio)
w = max(w, shortest_edge)
h = max(h, shortest_edge)
expected_height = h
expected_width = w
else:
expected_values = []
for images in image_inputs:
for image in images:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
def expected_output_image_shape(self, images):
height, width = self.get_expected_values(images, batched=True)
effective_nb_images = self.num_images * 5 if self.do_image_splitting else 1
return effective_nb_images, self.num_channels, height, width
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
batch_size = batch_size if batch_size is not None else self.batch_size
min_resolution = min_resolution if min_resolution is not None else self.min_resolution
max_resolution = max_resolution if max_resolution is not None else self.max_resolution
num_channels = num_channels if num_channels is not None else self.num_channels
num_images = num_images if num_images is not None else self.num_images
images_list = []
for i in range(batch_size):
images = []
for j in range(num_images):
if equal_resolution:
width = height = max_resolution
else:
if size_divisor is not None:
min_resolution = max(size_divisor, min_resolution)
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
images_list.append(images)
if not numpify and not torchify:
images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list]
if torchify:
images_list = [[torch.from_numpy(image) for image in images] for images in images_list]
if numpify:
images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list]
return images_list
@require_torch
@require_vision
class Idefics2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Idefics2ImageProcessor if is_vision_available() else None
fast_image_processing_class = Idefics2ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Idefics2ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "do_image_splitting"))
def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for sample_images in image_inputs:
for image in sample_images:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_numpy_4_channels(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processor_dict = self.image_processor_dict
image_processor_dict["image_mean"] = [0.5, 0.5, 0.5, 0.5]
image_processor_dict["image_std"] = [0.5, 0.5, 0.5, 0.5]
image_processing = image_processing_class(**image_processor_dict)
# create random numpy tensors
self.image_processor_tester.num_channels = 4
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for sample_images in image_inputs:
for image in sample_images:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(
image_inputs[0], input_data_format="channels_last", return_tensors="pt"
).pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(
image_inputs, input_data_format="channels_last", return_tensors="pt"
).pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_pil(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for images in image_inputs:
for image in images:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_pytorch(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for images in image_inputs:
for image in images:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(self.image_processor_tester.batch_size, *expected_output_image_shape),
)
def test_image_splitting(self):
for image_processing_class in self.image_processor_list:
image_processor_dict = self.image_processor_dict.copy()
image_processor_dict["do_image_splitting"] = True
image_processing = image_processing_class(**image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(
equal_resolution=True, torchify=True, num_images=1
)
result = image_processing(image_inputs[0], return_tensors="pt")
self.assertEqual(result.pixel_values.shape[1], 5)
image_processor_dict["do_image_splitting"] = False
image_processing = image_processing_class(**image_processor_dict)
result = image_processing(image_inputs[0], return_tensors="pt")
if len(result.pixel_values.shape) == 5:
self.assertEqual(result.pixel_values.shape[1], 1)
else:
self.assertEqual(result.pixel_values.shape[1], self.image_processor_tester.num_channels)
def test_pixel_attention_mask(self):
for image_processing_class in self.image_processor_list:
image_processor_dict = self.image_processor_dict.copy()
image_processor_dict["do_pad"] = True
image_processing = image_processing_class(**image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
result = image_processing(image_inputs, return_tensors="pt")
self.assertIn("pixel_attention_mask", result)
self.assertEqual(result.pixel_attention_mask.shape[-2:], result.pixel_values.shape[-2:])
image_processor_dict["do_pad"] = False
image_processor_dict["do_image_splitting"] = False
image_processing = image_processing_class(**image_processor_dict)
equal_size_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
result = image_processing(equal_size_inputs, return_tensors="pt")
self.assertNotIn("pixel_attention_mask", result)
def test_convert_rgb(self):
for image_processing_class in self.image_processor_list:
rgba_image = Image.new("RGBA", (100, 100), (255, 0, 0, 128))
# Test with do_convert_rgb=True - this should work for all processors
image_processor_dict = self.image_processor_dict.copy()
image_processor_dict["do_convert_rgb"] = True
image_processing = image_processing_class(**image_processor_dict)
result = image_processing([rgba_image], return_tensors="pt")
self.assertIsNotNone(result.pixel_values)
rgb_image = rgba_image.convert("RGB")
image_processor_dict["do_convert_rgb"] = False
image_processing = image_processing_class(**image_processor_dict)
# Use the RGB image instead of RGBA when do_convert_rgb=False
result = image_processing([rgb_image], return_tensors="pt")
self.assertIsNotNone(result.pixel_values)
# Additional test: verifying proper handling of regular RGB images
rgb_image = Image.new("RGB", (100, 100), (255, 0, 0))
result = image_processing([rgb_image], return_tensors="pt")
self.assertIsNotNone(result.pixel_values)
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(
equal_resolution=False, num_images=5, torchify=True
)
# pop some images to have non homogenous batches:
indices_to_pop = [i if np.random.random() < 0.5 else None for i in range(len(dummy_images))]
for i in indices_to_pop:
if i is not None:
dummy_images[i].pop()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self._assert_slow_fast_tensors_equivalence(
encoding_slow.pixel_attention_mask.float(), encoding_fast.pixel_attention_mask.float()
)
| transformers/tests/models/idefics2/test_image_processing_idefics2.py/0 | {
"file_path": "transformers/tests/models/idefics2/test_image_processing_idefics2.py",
"repo_id": "transformers",
"token_count": 7984
} | 581 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPT2Tokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class InstructBlipProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = InstructBlipProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = BlipImageProcessor()
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model")
qformer_tokenizer = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert")
processor = InstructBlipProcessor(image_processor, tokenizer, qformer_tokenizer)
processor.save_pretrained(cls.tmpdirname)
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_qformer_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).qformer_tokenizer
def prepare_processor_dict(self):
return {"num_query_tokens": 1}
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def test_save_load_pretrained_additional_features(self):
processor = InstructBlipProcessor(
tokenizer=self.get_tokenizer(),
image_processor=self.get_image_processor(),
qformer_tokenizer=self.get_qformer_tokenizer(),
)
with tempfile.TemporaryDirectory() as tmpdir:
processor.save_pretrained(tmpdir)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = InstructBlipProcessor.from_pretrained(
tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, BlipImageProcessor)
self.assertIsInstance(processor.qformer_tokenizer, BertTokenizerFast)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
qformer_tokenizer = self.get_qformer_tokenizer()
processor_kwargs = self.prepare_processor_dict()
processor = InstructBlipProcessor(
tokenizer=tokenizer,
image_processor=image_processor,
qformer_tokenizer=qformer_tokenizer,
**processor_kwargs,
)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors="np")
input_processor = processor(images=image_input, return_tensors="np")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
qformer_tokenizer = self.get_qformer_tokenizer()
processor_kwargs = self.prepare_processor_dict()
processor = InstructBlipProcessor(
tokenizer=tokenizer,
image_processor=image_processor,
qformer_tokenizer=qformer_tokenizer,
**processor_kwargs,
)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(
list(inputs.keys()),
["qformer_input_ids", "qformer_attention_mask", "input_ids", "attention_mask", "pixel_values"],
)
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
qformer_tokenizer = self.get_qformer_tokenizer()
processor_kwargs = self.prepare_processor_dict()
processor = InstructBlipProcessor(
tokenizer=tokenizer,
image_processor=image_processor,
qformer_tokenizer=qformer_tokenizer,
**processor_kwargs,
)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
| transformers/tests/models/instructblip/test_processing_instructblip.py/0 | {
"file_path": "transformers/tests/models/instructblip/test_processing_instructblip.py",
"repo_id": "transformers",
"token_count": 2340
} | 582 |
# Copyright 2024 JetMoe AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch JetMoe model."""
import gc
import unittest
import pytest
from transformers import AutoTokenizer, JetMoeConfig, is_torch_available
from transformers.testing_utils import (
backend_empty_cache,
require_flash_attn,
require_torch,
require_torch_gpu,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
JetMoeForCausalLM,
JetMoeForSequenceClassification,
JetMoeModel,
)
class JetMoeModelTester(CausalLMModelTester):
config_class = JetMoeConfig
forced_config_args = ["pad_token_id"]
if is_torch_available():
base_model_class = JetMoeModel
causal_lm_class = JetMoeForCausalLM
sequence_class = JetMoeForSequenceClassification
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_key_value_heads=2,
kv_channels=8,
intermediate_size=37,
hidden_act="silu",
num_local_experts=4,
num_experts_per_tok=2,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
scope=None,
):
super().__init__(parent)
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.kv_channels = kv_channels
self.num_attention_heads = num_key_value_heads * num_experts_per_tok
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_local_experts = num_local_experts
self.num_experts_per_tok = num_experts_per_tok
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.scope = scope
@require_torch
class JetMoeModelTest(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(JetMoeModel, JetMoeForCausalLM, JetMoeForSequenceClassification) if is_torch_available() else ()
)
test_headmasking = False
test_pruning = False
test_mismatched_shapes = False
test_cpu_offload = False
test_disk_offload_bin = False
test_disk_offload_safetensors = False
model_tester_class = JetMoeModelTester
pipeline_model_mapping = (
{
"feature-extraction": JetMoeModel,
"text-classification": JetMoeForSequenceClassification,
"text-generation": JetMoeForCausalLM,
}
if is_torch_available()
else {}
)
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="JetMoe flash attention does not support right padding")
@require_torch
class JetMoeIntegrationTest(unittest.TestCase):
@slow
def test_model_8b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = JetMoeForCausalLM.from_pretrained("jetmoe/jetmoe-8b")
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[0.2507, -2.7073, -1.3445, -1.9363, -1.7216, -1.7370, -1.9054, -1.9792]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([-3.3689, 5.9006, 5.7450, -1.7012, -4.7072, -4.7071, -4.7071, -4.7071, -4.7072, -4.7072, -4.7072, -4.7071, 3.8321, 9.1746, -4.7071, -4.7072, -4.7071, -4.7072, -4.7071, -4.7072, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071]) # fmt: skip
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
del model
backend_empty_cache(torch_device)
gc.collect()
@slow
def test_model_8b_generation(self):
EXPECTED_TEXT_COMPLETION = """My favourite condiment is ....\nI love ketchup. I love"""
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("jetmoe/jetmoe-8b", use_fast=False)
model = JetMoeForCausalLM.from_pretrained("jetmoe/jetmoe-8b")
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=10, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
@slow
def test_model_8b_batched_generation(self):
EXPECTED_TEXT_COMPLETION = [
"""My favourite condiment is ....\nI love ketchup. I love""",
"""My favourite 2018 Christmas present was a new pair""",
]
prompt = [
"My favourite condiment is ",
"My favourite ",
]
tokenizer = AutoTokenizer.from_pretrained("jetmoe/jetmoe-8b", use_fast=False)
model = JetMoeForCausalLM.from_pretrained("jetmoe/jetmoe-8b")
input_ids = tokenizer(prompt, return_tensors="pt", padding=True).to(model.model.embed_tokens.weight.device)
print(input_ids)
# greedy generation outputs
generated_ids = model.generate(**input_ids, max_new_tokens=10, temperature=0)
text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
print(text)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
| transformers/tests/models/jetmoe/test_modeling_jetmoe.py/0 | {
"file_path": "transformers/tests/models/jetmoe/test_modeling_jetmoe.py",
"repo_id": "transformers",
"token_count": 3273
} | 583 |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch MaskFormer Swin model."""
import collections
import unittest
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class MaskFormerSwinModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
patch_size=2,
num_channels=3,
embed_dim=16,
depths=[1, 2, 1],
num_heads=[2, 2, 4],
window_size=2,
mlp_ratio=2.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
patch_norm=True,
initializer_range=0.02,
layer_norm_eps=1e-5,
is_training=True,
scope=None,
use_labels=True,
type_sequence_label_size=10,
encoder_stride=8,
out_features=["stage1", "stage2", "stage3"],
out_indices=[1, 2, 3],
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.patch_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.is_training = is_training
self.scope = scope
self.use_labels = use_labels
self.type_sequence_label_size = type_sequence_label_size
self.encoder_stride = encoder_stride
self.out_features = out_features
self.out_indices = out_indices
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return MaskFormerSwinConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
embed_dim=self.embed_dim,
depths=self.depths,
num_heads=self.num_heads,
window_size=self.window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
drop_path_rate=self.drop_path_rate,
hidden_act=self.hidden_act,
use_absolute_embeddings=self.use_absolute_embeddings,
path_norm=self.patch_norm,
layer_norm_eps=self.layer_norm_eps,
initializer_range=self.initializer_range,
encoder_stride=self.encoder_stride,
out_features=self.out_features,
out_indices=self.out_indices,
)
def create_and_check_model(self, config, pixel_values, labels):
model = MaskFormerSwinModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim))
def create_and_check_backbone(self, config, pixel_values, labels):
model = MaskFormerSwinBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [13, 16, 16, 16])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, [16, 32, 64])
# verify ValueError
with self.parent.assertRaises(ValueError):
config.out_features = ["stem"]
model = MaskFormerSwinBackbone(config=config)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class MaskFormerSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
fx_compatible = False
test_torchscript = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_torch_exportable = True
def setUp(self):
self.model_tester = MaskFormerSwinModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=MaskFormerSwinConfig,
has_text_modality=False,
embed_dim=37,
common_properties=["image_size", "patch_size", "num_channels"],
)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
)
)
def test_multi_gpu_data_parallel_forward(self):
pass
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
@unittest.skip(reason="Swin does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Swin does not support feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def test_attention_outputs(self):
pass
def check_hidden_states_output(self, inputs_dict, config, model_class, image_size):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# Swin has a different seq_length
patch_size = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[num_patches, self.model_tester.embed_dim],
)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
image_size = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
self.check_hidden_states_output(inputs_dict, config, model_class, image_size)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
self.check_hidden_states_output(inputs_dict, config, model_class, image_size)
def test_hidden_states_output_with_padding(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.patch_size = 3
image_size = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
patch_size = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def test_model_from_pretrained(self):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def test_initialization(self):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def test_gradient_checkpointing_backward_compatibility(self):
pass
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (list, tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
@require_torch
class MaskFormerSwinBackboneTest(unittest.TestCase, BackboneTesterMixin):
all_model_classes = (MaskFormerSwinBackbone,) if is_torch_available() else ()
config_class = MaskFormerSwinConfig
def setUp(self):
self.model_tester = MaskFormerSwinModelTester(self)
# Overriding as returned hidden states are tuples of tensors instead of a single tensor
def test_backbone_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
batch_size = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
backbone = backbone_class(config)
backbone.to(torch_device)
backbone.eval()
outputs = backbone(**inputs_dict)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, tuple)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
outputs = backbone(**inputs_dict, output_hidden_states=True)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states), len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
h_batch_size, _, h_n_channels = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
outputs = backbone(**inputs_dict, output_attentions=True)
self.assertIsNotNone(outputs.attentions)
| transformers/tests/models/maskformer/test_modeling_maskformer_swin.py/0 | {
"file_path": "transformers/tests/models/maskformer/test_modeling_maskformer_swin.py",
"repo_id": "transformers",
"token_count": 7893
} | 584 |
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
import numpy as np
from transformers import MllamaProcessor
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from PIL import Image
@require_torch
@require_vision
class MllamaProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = MllamaProcessor
@classmethod
def setUpClass(cls):
cls.checkpoint = "hf-internal-testing/mllama-11b"
processor = MllamaProcessor.from_pretrained(cls.checkpoint)
cls.image1 = Image.new("RGB", (224, 220))
cls.image2 = Image.new("RGB", (512, 128))
cls.image_token = processor.image_token
cls.image_token_id = processor.image_token_id
cls.pad_token_id = processor.tokenizer.pad_token_id
cls.bos_token = processor.bos_token
cls.bos_token_id = processor.tokenizer.bos_token_id
cls.tmpdirname = tempfile.mkdtemp()
processor.save_pretrained(cls.tmpdirname)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def prepare_processor_dict(self):
return {"chat_template": "{% for message in messages %}{% if loop.index0 == 0 %}{{ bos_token }}{% endif %}{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' }}{% if message['content'] is string %}{{ message['content'] }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' %}{{ '<|image|>' }}{% elif content['type'] == 'text' %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{ '<|eot_id|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"} # fmt: skip
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded)
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_apply_chat_template(self):
# Message contains content which a mix of lists with images and image urls and string
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "image"},
{"type": "text", "text": "What do these images show?"},
],
},
{
"role": "assistant",
"content": [
{"type": "text", "text": "The first image shows the statue of Liberty in New York."},
],
},
{
"role": "user",
"content": [
{"type": "text", "text": "And who is that?"},
],
},
]
processor = MllamaProcessor.from_pretrained(self.tmpdirname)
rendered = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
expected_rendered = (
"<|begin_of_text|>"
"<|start_header_id|>user<|end_header_id|>\n\n"
"<|image|><|image|>What do these images show?"
"<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
"The first image shows the statue of Liberty in New York."
"<|eot_id|>"
"<|start_header_id|>user<|end_header_id|>\n\n"
"And who is that?"
"<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
)
self.assertEqual(rendered, expected_rendered)
messages = [
{
"role": "system",
"content": [
{"type": "text", "text": "This is a test sentence."},
],
},
{
"role": "user",
"content": [
{"type": "text", "text": "This is a response."},
],
},
]
input_ids = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_ids = [
[
128000, # <|begin_of_text|>
128006, # <|start_header_id|>
9125, # "system"
128007, # <|end_of_header|>
271, # "\n\n"
2028,
374,
264,
1296,
11914,
13, # "This is a test sentence."
128009, # <|eot_id|>
128006, # <|start_header_id|>
882, # "user"
128007, # <|end_of_header|>
271, # "\n\n"
2028,
374,
264,
2077,
13, # "This is a response.",
128009, # <|eot_id|>
128006, # <|start_header_id|>
78191, # "assistant"
128007, # <|end_of_header|>
271, # "\n\n"
]
]
self.assertEqual(input_ids, expected_ids)
# test image in multiple locations
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image in two sentences"},
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
{"type": "text", "text": " Test sentence "},
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
{"type": "text", "text": "ok\n"},
],
}
]
rendered = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
expected_rendered = (
"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n"
"Describe this image in two sentences<|image|> Test sentence <|image|>ok\n<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
)
self.assertEqual(rendered, expected_rendered)
input_ids = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
# fmt: off
expected_ids = [[
128000, 128006, 882, 128007, 271, 75885, 420, 2217, 304, 1403, 23719, 128256,
3475, 11914, 262, 128256, 564, 198, 128009, 128006, 78191, 128007, 271,
]]
# fmt: on
self.assertEqual(input_ids, expected_ids)
# text format for content
messages_list = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "Describe this image in two sentences"},
],
}
]
messages_str = [
{
"role": "user",
"content": "<|image|>Describe this image in two sentences",
}
]
rendered_list = processor.apply_chat_template(messages_list, add_generation_prompt=True, tokenize=False)
rendered_str = processor.apply_chat_template(messages_str, add_generation_prompt=True, tokenize=False)
self.assertEqual(rendered_list, rendered_str)
def test_process_interleaved_images_prompts_image_splitting(self):
processor = MllamaProcessor.from_pretrained(self.tmpdirname)
# Test that a single image is processed correctly
inputs = processor(images=self.image2, size={"width": 224, "height": 224})
self.assertEqual(inputs["pixel_values"].shape, (1, 1, 4, 3, 224, 224))
# Test that text is processed correctly
text = "<|begin_of_text|>This is a test sentence.<|end_of_text|>"
inputs = processor(text=text)
expected_ids = [128000, 2028, 374, 264, 1296, 11914, 13, 128001]
self.assertEqual(inputs["input_ids"][0], expected_ids)
self.assertEqual(inputs["attention_mask"][0], [1] * len(expected_ids))
self.assertEqual(inputs.get("cross_attention_mask"), None)
# Test a single sample with image and text
image_str = "<|image|>"
text_str = "This is a test sentence."
text = image_str + text_str
inputs = processor(
text=text,
images=self.image1,
size={"width": 128, "height": 128},
)
expected_ids = [self.image_token_id, self.bos_token_id] + [2028, 374, 264, 1296, 11914, 13]
self.assertEqual(inputs["pixel_values"].shape, (1, 1, 4, 3, 128, 128))
self.assertEqual(inputs["input_ids"][0], expected_ids)
self.assertEqual(inputs["attention_mask"][0], [1] * len(expected_ids))
cross_attention_mask = inputs["cross_attention_mask"]
self.assertEqual(cross_attention_mask.shape, (1, 8, 1, 4))
self.assertTrue(
np.all(cross_attention_mask == 1), f"Cross attention mask is not all ones: {cross_attention_mask}"
)
# Test batch
text = [
"<|image|>This is a test sentence.",
"This is a test sentence.<|image|><|image|>This is a test sentence.",
]
# fmt: off
expected_ids = [
[self.image_token_id, self.bos_token_id, 2028, 374, 264, 1296, 11914, 13],
[self.bos_token_id, 2028, 374, 264, 1296, 11914, 13, self.image_token_id, self.image_token_id, 2028, 374, 264, 1296, 11914, 13],
]
# fmt: onn
images = [[self.image1], [self.image1, self.image2]]
inputs = processor(text=text, images=images, padding=True, size={"width": 256, "height": 256})
self.assertEqual(inputs["pixel_values"].shape, (2, 2, 4, 3, 256, 256))
for input_ids_i, attention_mask_i, expected_ids_i in zip(inputs["input_ids"], inputs["attention_mask"], expected_ids):
pad_ids = [id for id, m in zip(input_ids_i, attention_mask_i) if m == 0]
input_ids = [id for id, m in zip(input_ids_i, attention_mask_i) if m == 1]
self.assertEqual(input_ids, expected_ids_i)
self.assertEqual(pad_ids, [self.pad_token_id] * len(pad_ids))
cross_attention_mask = inputs["cross_attention_mask"]
self.assertEqual(cross_attention_mask.shape, (2, 15, 2, 4))
# Check that only first tile of first sample is attended to all text tokens
first_sample_mask = cross_attention_mask[0].copy()
first_image_first_tile_attention = first_sample_mask[:, :1, :1] # text tokens, images, tiles
self.assertTrue(np.all(first_image_first_tile_attention == 1), f"Cross attention mask is not all ones: {first_image_first_tile_attention}")
# zero out first tile of first image
first_image_first_tile_attention[:, :1, :1] = 0
self.assertTrue(np.all(first_image_first_tile_attention == 0), f"Cross attention mask is not all zeros: {first_image_first_tile_attention}")
# second sample
second_sample_mask = cross_attention_mask[1].copy()
first_image_first_tile_attention = second_sample_mask[7:, :1, :1] # text tokens, images, tiles
self.assertTrue(np.all(first_image_first_tile_attention == 1), f"Cross attention mask is not all ones: {first_image_first_tile_attention}")
second_image_two_tiles_attention = second_sample_mask[8:, 1:2, :2] # text tokens, images, tiles
self.assertTrue(np.all(second_image_two_tiles_attention == 1), f"Cross attention mask is not all ones: {second_image_two_tiles_attention}")
# zero out both images masks
second_sample_mask[7:, :1, :1] = 0
second_sample_mask[8:, 1:2, :2] = 0
self.assertTrue(np.all(second_sample_mask == 0), f"Cross attention mask is not all zeros: {second_sample_mask}")
def test_process_interleaved_images_prompts_image_error(self):
text = [
"This is a test sentence.",
"In this other sentence we try some good things",
]
processor = MllamaProcessor.from_pretrained(self.tmpdirname)
inputs = processor(text=text, images=None, padding=True)
self.assertIsNotNone(inputs["input_ids"])
text = [
"This is a test sentence.<|image|>",
"In this other sentence we try some good things",
]
with self.assertRaises(ValueError):
processor(text=text, images=None, padding=True)
images = [[self.image1], []]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
text = [
"This is a test sentence.<|image|>",
"In this other sentence we try some good things<|image|>",
]
with self.assertRaises(ValueError):
processor(text=text, images=None, padding=True)
text = [
"This is a test sentence.<|image|>",
"In this other sentence we try some good things<|image|>",
]
images = [[self.image1], [self.image2]]
inputs = processor(text=text, images=images, padding=True)
images = [[self.image1, self.image2], []]
with self.assertRaises(ValueError):
processor(text=text, images=None, padding=True)
# see https://github.com/huggingface/transformers/pull/35934
images = [self.image1, self.image2]
with self.assertRaises(ValueError):
processor(text=text, images=None, padding=True)
def test_unstructured_kwargs_batched(self):
# Overridden because Mllama expects images in nested format. For 2 images it can't infer
# the correct nesting, so we better throw an error
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
image_input = [[image_input[0]], [image_input[1]]]
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1,
padding="longest",
max_length=76,
)
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
self.assertTrue(
len(inputs[self.text_input_name][0]) == len(inputs[self.text_input_name][1])
and len(inputs[self.text_input_name][1]) < 76
)
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
image_input = [[image_input[0]], [image_input[1]]]
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=3,
)
| transformers/tests/models/mllama/test_processing_mllama.py/0 | {
"file_path": "transformers/tests/models/mllama/test_processing_mllama.py",
"repo_id": "transformers",
"token_count": 7781
} | 585 |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch MobileViT model."""
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class MobileViTConfigTester(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, "hidden_sizes"))
self.parent.assertTrue(hasattr(config, "neck_hidden_sizes"))
self.parent.assertTrue(hasattr(config, "num_attention_heads"))
class MobileViTModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
patch_size=2,
num_channels=3,
last_hidden_size=32,
num_attention_heads=4,
hidden_act="silu",
conv_kernel_size=3,
output_stride=32,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
classifier_dropout_prob=0.1,
initializer_range=0.02,
is_training=True,
use_labels=True,
num_labels=10,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.last_hidden_size = last_hidden_size
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.conv_kernel_size = conv_kernel_size
self.output_stride = output_stride
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.classifier_dropout_prob = classifier_dropout_prob
self.use_labels = use_labels
self.is_training = is_training
self.num_labels = num_labels
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
pixel_labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels, pixel_labels
def get_config(self):
return MobileViTConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
num_attention_heads=self.num_attention_heads,
hidden_act=self.hidden_act,
conv_kernel_size=self.conv_kernel_size,
output_stride=self.output_stride,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
classifier_dropout_prob=self.classifier_dropout_prob,
initializer_range=self.initializer_range,
hidden_sizes=[12, 16, 20],
neck_hidden_sizes=[8, 8, 16, 16, 32, 32, 32],
)
def create_and_check_model(self, config, pixel_values, labels, pixel_labels):
model = MobileViTModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape,
(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels):
config.num_labels = self.num_labels
model = MobileViTForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels):
config.num_labels = self.num_labels
model = MobileViTForSemanticSegmentation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.logits.shape,
(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
),
)
result = model(pixel_values, labels=pixel_labels)
self.parent.assertEqual(
result.logits.shape,
(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels, pixel_labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class MobileViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as MobileViT does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"image-feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
has_attentions = False
test_torch_exportable = True
def setUp(self):
self.model_tester = MobileViTModelTester(self)
self.config_tester = MobileViTConfigTester(self, config_class=MobileViTConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="MobileViT does not output attentions")
def test_attention_outputs(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_stages = 5
self.assertEqual(len(hidden_states), expected_num_stages)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
divisor = 2
for i in range(len(hidden_states)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]),
[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor],
)
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
def test_for_semantic_segmentation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "apple/mobilevit-small"
model = MobileViTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class MobileViTModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small") if is_vision_available() else None
@slow
def test_inference_image_classification_head(self):
model = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [-1.9364, -1.2327, -0.4653],
("cuda", 8): [-1.9364, -1.2327, -0.4653],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
@slow
def test_inference_semantic_segmentation(self):
model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
model = model.to(torch_device)
image_processor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
# verify the logits
expected_shape = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
],
("cuda", 8): [
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4229, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6739], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(logits[0, :3, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
@slow
def test_post_processing_semantic_segmentation(self):
model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
model = model.to(torch_device)
image_processor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
outputs.logits = outputs.logits.detach().cpu()
segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(50, 60)])
expected_shape = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, expected_shape)
segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs)
expected_shape = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, expected_shape)
| transformers/tests/models/mobilevit/test_modeling_mobilevit.py/0 | {
"file_path": "transformers/tests/models/mobilevit/test_modeling_mobilevit.py",
"repo_id": "transformers",
"token_count": 6440
} | 586 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import unittest
from transformers import MptConfig, is_torch_available
from transformers.testing_utils import (
Expectations,
require_bitsandbytes,
require_deterministic_for_xpu,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
AutoTokenizer,
MptForCausalLM,
MptForQuestionAnswering,
MptForSequenceClassification,
MptForTokenClassification,
MptModel,
)
@require_torch
class MptModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=False,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=48,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_dropout_prob = attention_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = None
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
def get_large_model_config(self):
return MptConfig.from_pretrained("mosaicml/mpt-7b")
def prepare_config_and_inputs(self, gradient_checkpointing=False):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config(gradient_checkpointing=gradient_checkpointing)
return (config, input_ids, input_mask, sequence_labels)
def get_config(self, gradient_checkpointing=False):
return MptConfig(
vocab_size=self.vocab_size,
seq_length=self.seq_length,
hidden_size=self.hidden_size,
n_layers=self.num_hidden_layers,
n_heads=self.num_attention_heads,
hidden_dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_dropout_prob,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
num_labels=self.num_labels,
gradient_checkpointing=gradient_checkpointing,
dtype="float32",
)
def create_and_check_mpt_model(self, config, input_ids, input_mask, *args):
model = MptModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layers)
def create_and_check_mpt_model_past(self, config, input_ids, input_mask, *args):
model = MptModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True)
outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids))
outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids))
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_mpt_model_attention_mask_past(self, config, input_ids, input_mask, *args):
model = MptModel(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_mpt_model_past_large_inputs(self, config, input_ids, input_mask, *args):
model = MptModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
output_hidden_states=True,
)
hidden_states_from_no_past = output_from_no_past["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)
hidden_states_from_past = output_from_past["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), hidden_states_from_past.shape[-1]).item()
output_from_no_past_slice = hidden_states_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = hidden_states_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args):
model = MptForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args):
config.num_labels = self.num_labels
model = MptForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args):
model = MptForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, *args, gradient_checkpointing=False
):
model = MptForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_mpt_weight_initialization(self, config, *args):
model = MptModel(config)
model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layers)
for key in model.state_dict():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask, sequence_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
class MptConfigTester(ConfigTester):
def __init__(self, parent, config_class=None, has_text_modality=True, common_properties=None, **kwargs):
super().__init__(parent, config_class, has_text_modality, common_properties, **kwargs)
def test_attn_config_as_dict(self):
config = self.config_class(**self.inputs_dict, attn_config={"attn_impl": "flash", "softmax_scale": None})
self.parent.assertTrue(config.attn_config.attn_impl == "flash")
self.parent.assertTrue(config.attn_config.softmax_scale is None)
def run_common_tests(self):
self.test_attn_config_as_dict()
return super().run_common_tests()
@require_torch
class MptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
MptModel,
MptForCausalLM,
MptForSequenceClassification,
MptForTokenClassification,
MptForQuestionAnswering,
)
if is_torch_available()
else ()
)
fx_compatible = False
test_missing_keys = False
test_pruning = False
test_torchscript = False
test_head_masking = False
pipeline_model_mapping = (
{
"feature-extraction": MptModel,
"question-answering": MptForQuestionAnswering,
"text-classification": MptForSequenceClassification,
"text-generation": MptForCausalLM,
"token-classification": MptForTokenClassification,
"zero-shot": MptForSequenceClassification,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = MptModelTester(self)
self.config_tester = MptConfigTester(self, config_class=MptConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_mpt_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpt_model(*config_and_inputs)
def test_mpt_model_alibi_tensor(self):
# test creation of alibi tensor when num heads is not a power of two
config_and_inputs = self.model_tester.prepare_config_and_inputs()
config_and_inputs[0].n_heads = 6
self.model_tester.create_and_check_mpt_model(*config_and_inputs)
def test_mpt_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpt_model_past(*config_and_inputs)
def test_mpt_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpt_model_attention_mask_past(*config_and_inputs)
def test_mpt_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpt_model_past_large_inputs(*config_and_inputs)
def test_mpt_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_mpt_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs)
def test_mpt_token_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_token_classification_model(*config_and_inputs)
def test_mpt_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
def test_mpt_weight_initialization(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpt_weight_initialization(*config_and_inputs)
@unittest.skip(reason="For backward compatibility the lm_head is not in the model's state dict on the Hub.")
def test_model_weights_reload_no_missing_tied_weights(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "mosaicml/mpt-7b"
model = MptModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch_accelerator
@require_bitsandbytes
class MptIntegrationTests(unittest.TestCase):
def test_generation_8k(self):
model_id = "mosaicml/mpt-7b-8k"
tokenizer = AutoTokenizer.from_pretrained(model_id)
# Load in 4bit to fit the daily CI runner GPU RAM
model = MptForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True)
input_text = "Hello"
expected_outputs = Expectations({
(None, None): "Hello, I'm a new user of the forum. I have a question about the \"Solaris",
("cuda", 8): "Hello, I'm a new user of the forum. I have a question. I have a problem with",
("rocm", (9, 5)): "Hello, I'm a newbie to the forum. I have a question about the \"B\" in",
}) # fmt: off
expected_output = expected_outputs.get_expectation()
inputs = tokenizer(input_text, return_tensors="pt").to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=20)
decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
self.assertEqual(decoded_output, expected_output)
def test_generation(self):
model_id = "mosaicml/mpt-7b"
tokenizer = AutoTokenizer.from_pretrained(model_id)
# Load in 4bit to fit the daily CI runner GPU RAM
model = MptForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True)
input_text = "Hello"
expected_outputs = Expectations({
(None, None): "Hello and welcome to the first episode of the new podcast, The Frugal Feminist.\n",
("rocm", (9, 5)): "Hello and welcome to the first day of the new release at The Stamp Man!\nToday we are",
("xpu", 3): "Hello and welcome to the first ever episode of the new and improved, and hopefully improved, podcast.\n",
("cuda", 8): "Hello and welcome to the first ever episode of the new and improved, and hopefully improved, podcast.\n",
}) # fmt: off
expected_output = expected_outputs.get_expectation()
inputs = tokenizer(input_text, return_tensors="pt").to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=20)
decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
self.assertEqual(decoded_output, expected_output)
@require_deterministic_for_xpu
def test_generation_batched(self):
model_id = "mosaicml/mpt-7b"
tokenizer = AutoTokenizer.from_pretrained(model_id)
# Load in 4bit to fit the daily CI runner GPU RAM
model = MptForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True)
input_texts = ["Hello my name is", "Today I am going at the gym and"]
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenizer.padding_side = "left"
inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(torch_device)
expected_outputs = Expectations(
{
(None, None): [
"Hello my name is Tiffany and I am a mother of two beautiful children. I have been a nanny for the",
"Today I am going at the gym and then I am going to go to the grocery store. I am going to buy some food and some",
],
("xpu", 3): [
"Hello my name is Tiffany. I am a mother of two beautiful children. I have been a nanny for over",
"Today I am going at the gym and then I am going to go to the mall with my mom. I am going to go to the",
],
("cuda", 8): [
"Hello my name is Tiffany and I am a mother of two beautiful children. I have been a nanny for over",
"Today I am going at the gym and then I am going to go to the grocery store. I am going to make a list of things",
],
("rocm", (9, 5)): [
"Hello my name is Jasmine and I am a very sweet and loving dog. I am a very playful dog and I",
"Today I am going at the gym and then I am going to go to the mall. I am going to buy a new pair of jeans",
],
}
)
expected_output = expected_outputs.get_expectation()
outputs = model.generate(**inputs, max_new_tokens=20)
decoded_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
for i, predicted_output in enumerate(decoded_outputs):
self.assertEqual(predicted_output, expected_output[i])
def test_model_logits(self):
model_id = "mosaicml/mpt-7b"
# Load in 4bit to fit the daily CI runner GPU RAM
model = MptForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True)
dummy_input = torch.LongTensor([[1, 2, 3, 4, 5]]).to(torch_device)
outputs = model(dummy_input, output_hidden_states=True)
expected_slices = Expectations(
{
(None, None): torch.Tensor([-0.2520, -0.2178, -0.1953]),
("xpu", 3): torch.Tensor([-0.2090, -0.2061, -0.1465]),
("cuda", 8): torch.Tensor([-0.2559, -0.2227, -0.2217]),
# TODO: This is quite a bit off, check BnB
("rocm", (9, 5)): torch.Tensor([-0.3008, -0.1309, -0.1562]),
}
)
expected_slice = expected_slices.get_expectation().to(torch_device, torch.bfloat16)
predicted_slice = outputs.hidden_states[-1][0, 0, :3]
torch.testing.assert_close(expected_slice, predicted_slice, rtol=1e-3, atol=1e-3)
| transformers/tests/models/mpt/test_modeling_mpt.py/0 | {
"file_path": "transformers/tests/models/mpt/test_modeling_mpt.py",
"repo_id": "transformers",
"token_count": 10096
} | 587 |
# Copyright 2024
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import unittest
from transformers import MyT5Tokenizer
from transformers.testing_utils import slow
from transformers.utils import is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
FRAMEWORK = "pt"
elif is_tf_available():
FRAMEWORK = "tf"
else:
FRAMEWORK = "jax"
def bytes_to_hex(bline: bytes, sep: str = " ") -> str:
return str(binascii.hexlify(bline, sep), "utf-8")
def str_to_hex(line: str, sep: str = " ") -> str:
return bytes_to_hex(bytes(line, "utf-8"), sep)
class TestByteRewriter(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.tokenizer = MyT5Tokenizer.from_pretrained("Tomlim/myt5-base")
def test_simple_decompose(self):
decompose_rewriter = self.tokenizer.decompose_rewriter
# test rewriting
in_str = "Hello WorlD"
out_str = "hAello wAorldA"
in_hex = str_to_hex(in_str).split(" ")
out_hex = str_to_hex(out_str).split(" ")
self.assertEqual(decompose_rewriter.rewrite_bytes(in_hex), out_hex)
def test_simple_decompose_reversible(self):
decompose_rewriter = self.tokenizer.decompose_rewriter
in_str = "Hello WorlD"
out_str = "Hello WorlD"
in_hex = str_to_hex(in_str).split(" ")
out_hex = str_to_hex(out_str).split(" ")
self.assertEqual(
decompose_rewriter.rewrite_bytes(decompose_rewriter.rewrite_bytes(in_hex), reverse=True), out_hex
)
def test_simple_decompose_non_latin(self):
decompose_rewriter = self.tokenizer.decompose_rewriter
in_str = "你好世界 Hello WorlD"
out_str = "你好世界 hAello wAorldA"
in_hex = str_to_hex(in_str).split(" ")
out_hex = str_to_hex(out_str).split(" ")
self.assertEqual(decompose_rewriter.rewrite_bytes(in_hex), out_hex)
def test_unrecognized_byte(self):
decompose_rewriter = self.tokenizer.decompose_rewriter
in_hex = ["00", "01", "xx", "03", "61"]
out_hex = ["00", "01", "xx", "03", "61"]
self.assertEqual(decompose_rewriter.rewrite_bytes(in_hex), out_hex)
# This is way too slow, let's not run it on CircleCI. When trying to use cache, we get OOM and worker(s) crashed.
@slow
class MyT5TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MyT5Tokenizer
test_rust_tokenizer = False
def get_tokenizer(cls, **kwargs) -> MyT5Tokenizer:
return cls.tokenizer_class.from_pretrained("Tomlim/myt5-base", **kwargs)
@unittest.skip(reason="inputs cannot be pretokenized as ids depend on whole input string")
def test_pretokenized_inputs(self):
pass
def test_convert_tokens_to_string_format(self):
tokenizer = self.get_tokenizer()
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokens = ["52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90", "</s>"]
string = tokenizer.convert_tokens_to_string(tokens)
self.assertIsInstance(string, str)
def test_simple_tokenize(self):
tokenizer = self.get_tokenizer()
in_str = "Hello World"
out_tokens = ["52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90"]
self.assertEqual(tokenizer.tokenize(in_str), out_tokens)
in_pl_str = "Witaj świecie"
out_tokens = ["77", "41", "69", "74", "61", "6a", "20", "4b", "a5", "97", "63", "69", "65"]
self.assertEqual(tokenizer.tokenize(in_pl_str), out_tokens)
in_jp_str = "こんにちは世界"
out_tokens = ["58", "80", "91", "a1", "e4", "b8", "96", "e7", "95", "8c"]
self.assertEqual(tokenizer.tokenize(in_jp_str), out_tokens)
def test_batch_tokenize(self):
tokenizer = self.get_tokenizer()
in_batch = ["Hello World", "Witaj świecie", "こんにちは世界"]
out_tokens = [
["52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90", "</s>"],
["77", "41", "69", "74", "61", "6a", "20", "4b", "a5", "97", "63", "69", "65", "</s>"],
["58", "80", "91", "a1", "e4", "b8", "96", "e7", "95", "8c", "</s>"],
]
self.assertListEqual(
[tokenizer.convert_ids_to_tokens(ids) for ids in tokenizer(in_batch)["input_ids"]], out_tokens
)
def test_special_bytes(self):
tokenizer = self.get_tokenizer()
in_str_special = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09"
out_tokens = ["00", "01", "02", "03", "04", "05", "06", "07", "08", "09"]
self.assertEqual(tokenizer.tokenize(in_str_special), out_tokens)
in_str_mixed = "\x00Hello\x01 World\x02"
out_tokens = ["00", "52", "85", "91", "9f", "6f", "01", "20", "52", "85", "9f", "90", "02"]
self.assertEqual(tokenizer.tokenize(in_str_mixed), out_tokens)
def test_special_tokens(self):
tokenizer = self.get_tokenizer()
in_str_special = "<unk></s><pad>"
out_tokens = ["<unk>", "</s>", "<pad>"]
self.assertEqual(tokenizer.tokenize(in_str_special), out_tokens)
in_str_not_special = "<s>"
out_tokens = ["3c", "73", "3e"]
self.assertEqual(tokenizer.tokenize(in_str_not_special), out_tokens)
in_str_mixed = "<s>Hello World</s>"
out_tokens = ["3c", "73", "3e", "52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90", "</s>"]
self.assertEqual(tokenizer.tokenize(in_str_mixed), out_tokens)
def test_token_ids_conversion(self):
tokenizer = self.get_tokenizer()
tokens_range = [f"{x:02x}" for x in range(256)]
indices_range = list(range(3, 256 + 3))
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens_range), indices_range)
self.assertListEqual(tokenizer.convert_ids_to_tokens(indices_range), tokens_range)
special_tokens = ["<pad>", "</s>", "<unk>"]
special_indices = [0, 1, 2]
self.assertListEqual(tokenizer.convert_tokens_to_ids(special_tokens), special_indices)
self.assertListEqual(tokenizer.convert_ids_to_tokens(special_indices), special_tokens)
| transformers/tests/models/myt5/test_tokenization_myt5.py/0 | {
"file_path": "transformers/tests/models/myt5/test_tokenization_myt5.py",
"repo_id": "transformers",
"token_count": 3048
} | 588 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import requests
from transformers import (
AutoProcessor,
Ovis2Config,
Ovis2ForConditionalGeneration,
Ovis2Model,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
class Ovis2VisionText2TextModelTester:
def __init__(
self,
parent,
seq_length=7,
text_config={
"model_type": "qwen2",
"seq_length": 7,
"is_training": True,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 4,
"intermediate_size": 54,
"hidden_act": "gelu",
"max_position_embeddings": 580,
"initializer_range": 0.02,
"num_labels": 3,
"pad_token_id": 0,
},
is_training=True,
vision_config={
"image_size": 32,
"patch_size": 8,
"num_channels": 3,
"hidden_size": 64,
"vocab_size": 99,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 54,
"attention_dropout": 0.0,
"hidden_act": "silu",
"qkv_bias": False,
"hidden_stride": 2,
"tokenize_function": "softmax",
},
image_token_id=1,
visual_indicator_token_ids=[],
vocab_size=99,
hidden_size=64,
ignore_id=-100,
):
self.parent = parent
self.text_config = text_config
self.vision_config = vision_config
self.image_token_id = image_token_id
self.visual_indicator_token_ids = visual_indicator_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.image_seq_length = (
vision_config["image_size"] // (vision_config["patch_size"] * vision_config["hidden_stride"])
) ** 2
self.seq_length = seq_length + self.image_seq_length
self.is_training = is_training
self.num_attention_heads = text_config["num_attention_heads"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.pad_token_id = text_config["pad_token_id"]
self.ignore_id = ignore_id
self.batch_size = 3
self.num_channels = 3
def get_config(self):
return Ovis2Config(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
visual_indicator_token_ids=self.visual_indicator_token_ids,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
vocab_range = self.vocab_size - 2
input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_range) + 2
input_ids[:, : self.image_seq_length] = config.image_token_id
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
labels = torch.zeros((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device)
labels[:, : self.image_seq_length] = self.ignore_id
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels,
}
return config, inputs_dict
@require_torch
class Ovis2ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `Ovis2ForConditionalGeneration`.
"""
all_model_classes = (
(
Ovis2Model,
Ovis2ForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"image-text-to-text": Ovis2ForConditionalGeneration} if is_torch_available() else {}
_is_composite = True
test_pruning = False
test_torchscript = False
test_head_masking = False
def setUp(self):
self.model_tester = Ovis2VisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Ovis2Config, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
# while some other models require pixel_values to be present
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
@require_torch
@slow
class Ovis2IntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained(
"thisisiron/Ovis2-2B-hf",
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
self.image = Image.open(requests.get(url, stream=True).raw)
self.prompt_image = ""
self.messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What do you see in this image?"},
],
}
]
self.text = self.processor.apply_chat_template(self.messages, add_generation_prompt=True, tokenize=False)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_small_model_integration_test(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf", dtype="bfloat16", device_map=torch_device
)
inputs = self.processor(images=self.image, text=self.text, return_tensors="pt").to(
torch_device, torch.bfloat16
)
self.assertTrue(inputs.input_ids.shape[1] == 1314) # should expand num-image-tokens times
self.assertTrue(inputs.pixel_values.shape == torch.Size([5, 3, 448, 448]))
inputs = inputs.to(torch_device)
output = model.generate(**inputs, max_new_tokens=64)
EXPECTED_DECODED_TEXT = 'system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nI see two cats lying on a pink blanket. There are also two remote controls on the blanket.' # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batch(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf", dtype="bfloat16", device_map=torch_device
)
inputs = self.processor(
text=[self.text],
images=self.image,
return_tensors="pt",
padding=True,
).to(torch_device, torch.bfloat16)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = ['system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nI see two cats lying on a pink blanket. There are also two remote controls on the blanket.'] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_multi_image(self):
# related to (#29835)
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf",
dtype="bfloat16",
device_map=torch_device,
)
url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "image"},
{"type": "text", "text": "What do you see in these images?"},
],
}
]
text = self.processor.apply_chat_template(prompt, add_generation_prompt=True, tokenize=False)
inputs = self.processor(text=text, images=[self.image, image], return_tensors="pt").to(
torch_device, torch.bfloat16
)
output = model.generate(**inputs, max_new_tokens=40)
EXPECTED_DECODED_TEXT = 'system\nYou are a helpful assistant.\nuser\n\n\nWhat do you see in these images?\nassistant\nIn the first image, I see two cats lying on a pink blanket with remote controls nearby. The second image shows a dog standing on a wooden floor near a kitchen cabinet.' # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batch_different_resolutions(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf", dtype="bfloat16", device_map=torch_device
)
lowres_url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
lowres_img = Image.open(requests.get(lowres_url, stream=True).raw).resize((320, 240))
inputs = self.processor(
text=[self.text, self.text],
images=[lowres_img, self.image],
return_tensors="pt",
padding=True,
).to(torch_device, torch.bfloat16)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = [
'system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nAnswer: I see a brown dog standing on a wooden floor in what appears to be a kitchen.',
'system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nI see two cats lying on a pink blanket. There are also two remote controls on the blanket.'
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batch_matches_single(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf",
dtype="bfloat16",
device_map=torch_device,
)
lowres_url = "https://4.img-dpreview.com/files/p/TS560x560~forums/56876524/03975b28741443319e9a94615e35667e"
lowres_img = Image.open(requests.get(lowres_url, stream=True).raw)
inputs_batched = self.processor(
text=[self.text, self.text],
images=[self.image, lowres_img],
return_tensors="pt",
padding=True,
).to(torch_device, torch.bfloat16)
inputs_single = self.processor(text=self.text, images=self.image, return_tensors="pt", padding=True).to(
torch_device, torch.bfloat16
)
output_batched = model.generate(**inputs_batched, max_new_tokens=50)
output_single = model.generate(**inputs_single, max_new_tokens=50)
self.assertEqual(
self.processor.decode(output_batched[0], skip_special_tokens=True),
self.processor.decode(output_single[0], skip_special_tokens=True),
)
| transformers/tests/models/ovis2/test_modeling_ovis2.py/0 | {
"file_path": "transformers/tests/models/ovis2/test_modeling_ovis2.py",
"repo_id": "transformers",
"token_count": 6467
} | 589 |
# Copyright 2023 IBM and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch PatchTSMixer model."""
import inspect
import itertools
import random
import tempfile
import unittest
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from parameterized import parameterized
from transformers import is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from transformers.utils import check_torch_load_is_safe
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
TOLERANCE = 1e-4
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING,
MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING,
PatchTSMixerConfig,
PatchTSMixerForPrediction,
PatchTSMixerForPretraining,
PatchTSMixerForRegression,
PatchTSMixerForTimeSeriesClassification,
PatchTSMixerModel,
)
from transformers.models.patchtsmixer.modeling_patchtsmixer import (
PatchTSMixerEncoder,
PatchTSMixerForPredictionHead,
PatchTSMixerForPredictionOutput,
PatchTSMixerForRegressionOutput,
PatchTSMixerForTimeSeriesClassificationOutput,
PatchTSMixerLinearHead,
PatchTSMixerPretrainHead,
)
@require_torch
class PatchTSMixerModelTester:
def __init__(
self,
context_length: int = 32,
patch_length: int = 8,
num_input_channels: int = 3,
patch_stride: int = 8,
# d_model: int = 128,
hidden_size: int = 8,
# num_layers: int = 8,
num_hidden_layers: int = 2,
expansion_factor: int = 2,
dropout: float = 0.5,
mode: str = "common_channel",
gated_attn: bool = True,
norm_mlp="LayerNorm",
swin_hier: int = 0,
# masking related
mask_type: str = "forecast",
random_mask_ratio=0.5,
mask_patches: list = [2, 3],
forecast_mask_ratios: list = [1, 1],
mask_value=0,
masked_loss: bool = False,
mask_mode: str = "mask_before_encoder",
channel_consistent_masking: bool = True,
scaling: Optional[Union[str, bool]] = "std",
# Head related
head_dropout: float = 0.2,
# forecast related
prediction_length: int = 16,
out_channels: Optional[int] = None,
# Classification/regression related
# num_labels: int = 3,
num_targets: int = 3,
output_range: Optional[list] = None,
head_aggregation: Optional[str] = None,
# Trainer related
batch_size=13,
is_training=True,
seed_number=42,
post_init=True,
num_parallel_samples=4,
):
self.num_input_channels = num_input_channels
self.context_length = context_length
self.patch_length = patch_length
self.patch_stride = patch_stride
# self.d_model = d_model
self.hidden_size = hidden_size
self.expansion_factor = expansion_factor
# self.num_layers = num_layers
self.num_hidden_layers = num_hidden_layers
self.dropout = dropout
self.mode = mode
self.gated_attn = gated_attn
self.norm_mlp = norm_mlp
self.swin_hier = swin_hier
self.scaling = scaling
self.head_dropout = head_dropout
# masking related
self.mask_type = mask_type
self.random_mask_ratio = random_mask_ratio
self.mask_patches = mask_patches
self.forecast_mask_ratios = forecast_mask_ratios
self.mask_value = mask_value
self.channel_consistent_masking = channel_consistent_masking
self.mask_mode = mask_mode
self.masked_loss = masked_loss
# patching related
self.patch_last = True
# forecast related
self.prediction_length = prediction_length
self.out_channels = out_channels
# classification/regression related
# self.num_labels = num_labels
self.num_targets = num_targets
self.output_range = output_range
self.head_aggregation = head_aggregation
# Trainer related
self.batch_size = batch_size
self.is_training = is_training
self.seed_number = seed_number
self.post_init = post_init
self.num_parallel_samples = num_parallel_samples
def get_config(self):
config_ = PatchTSMixerConfig(
num_input_channels=self.num_input_channels,
context_length=self.context_length,
patch_length=self.patch_length,
patch_stride=self.patch_stride,
# d_model = self.d_model,
d_model=self.hidden_size,
expansion_factor=self.expansion_factor,
# num_layers = self.num_layers,
num_layers=self.num_hidden_layers,
dropout=self.dropout,
mode=self.mode,
gated_attn=self.gated_attn,
norm_mlp=self.norm_mlp,
swin_hier=self.swin_hier,
scaling=self.scaling,
head_dropout=self.head_dropout,
mask_type=self.mask_type,
random_mask_ratio=self.random_mask_ratio,
mask_patches=self.mask_patches,
forecast_mask_ratios=self.forecast_mask_ratios,
mask_value=self.mask_value,
channel_consistent_masking=self.channel_consistent_masking,
mask_mode=self.mask_mode,
masked_loss=self.masked_loss,
prediction_length=self.prediction_length,
out_channels=self.out_channels,
# num_labels=self.num_labels,
num_targets=self.num_targets,
output_range=self.output_range,
head_aggregation=self.head_aggregation,
post_init=self.post_init,
)
self.num_patches = config_.num_patches
return config_
def prepare_patchtsmixer_inputs_dict(self, config):
_past_length = config.context_length
# bs, n_vars, num_patch, patch_length
# [bs x context_length x n_vars]
past_values = floats_tensor([self.batch_size, _past_length, self.num_input_channels])
inputs_dict = {
"past_values": past_values,
}
return inputs_dict
def prepare_config_and_inputs(self):
config = self.get_config()
inputs_dict = self.prepare_patchtsmixer_inputs_dict(config)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
@require_torch
class PatchTSMixerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
PatchTSMixerModel,
PatchTSMixerForPrediction,
PatchTSMixerForPretraining,
PatchTSMixerForTimeSeriesClassification,
PatchTSMixerForRegression,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"feature-extraction": PatchTSMixerModel} if is_torch_available() else {}
is_encoder_decoder = False
test_pruning = False
test_head_masking = False
test_missing_keys = False
test_torchscript = False
test_inputs_embeds = False
test_resize_embeddings = True
test_resize_position_embeddings = False
test_mismatched_shapes = True
test_model_parallel = False
has_attentions = False
def setUp(self):
self.model_tester = PatchTSMixerModelTester()
self.config_tester = ConfigTester(
self,
config_class=PatchTSMixerConfig,
has_text_modality=False,
prediction_length=self.model_tester.prediction_length,
common_properties=["hidden_size", "expansion_factor", "num_hidden_layers"],
)
def test_config(self):
self.config_tester.run_common_tests()
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if model_class == PatchTSMixerForPrediction:
rng = random.Random(self.model_tester.seed_number)
labels = floats_tensor(
[
self.model_tester.batch_size,
self.model_tester.prediction_length,
self.model_tester.num_input_channels,
],
rng=rng,
)
inputs_dict["future_values"] = labels
elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING):
rng = random.Random(self.model_tester.seed_number)
labels = ids_tensor([self.model_tester.batch_size], self.model_tester.num_targets, rng=rng)
inputs_dict["target_values"] = labels
elif model_class in get_values(MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING):
rng = random.Random(self.model_tester.seed_number)
labels = floats_tensor([self.model_tester.batch_size, self.model_tester.num_targets], rng=rng)
inputs_dict["target_values"] = labels
inputs_dict["output_hidden_states"] = True
return inputs_dict
def test_save_load_strict(self):
config, _ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester,
"expected_num_hidden_layers",
self.model_tester.num_hidden_layers,
)
self.assertEqual(len(hidden_states), expected_num_layers)
expected_hidden_size = self.model_tester.hidden_size
self.assertEqual(hidden_states[0].shape[-1], expected_hidden_size)
num_patch = self.model_tester.num_patches
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[num_patch, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="No tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
output_ = model(**dict_inputs, return_dict=True, **additional_kwargs)
attributes_ = vars(output_)
dict_output = tuple(attributes_.values())
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (list, tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object),
set_nan_tensor_to_zero(dict_object),
atol=1e-5,
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
print(model_class)
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
tuple_inputs.update({"output_hidden_states": False})
dict_inputs.update({"output_hidden_states": False})
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
tuple_inputs.update({"output_hidden_states": False})
dict_inputs.update({"output_hidden_states": False})
check_equivalence(
model,
tuple_inputs,
dict_inputs,
)
def test_model_main_input_name(self):
model_signature = inspect.signature(getattr(PatchTSMixerModel, "forward"))
# The main input is the name of the argument after `self`
observed_main_input_name = list(model_signature.parameters.keys())[1]
self.assertEqual(PatchTSMixerModel.main_input_name, observed_main_input_name)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model_class == PatchTSMixerForPretraining:
expected_arg_names = [
"past_values",
"observed_mask",
"output_hidden_states",
"return_loss",
]
elif model_class == PatchTSMixerModel:
expected_arg_names = [
"past_values",
"observed_mask",
"output_hidden_states",
]
elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING) or model_class in get_values(
MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING
):
expected_arg_names = [
"past_values",
"target_values",
"output_hidden_states",
"return_loss",
]
else:
# PatchTSMixerForPrediction
expected_arg_names = [
"past_values",
"observed_mask",
"future_values",
"output_hidden_states",
"return_loss",
]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
@is_flaky()
def test_retain_grad_hidden_states_attentions(self):
super().test_retain_grad_hidden_states_attentions()
@unittest.skip(reason="Model does not have input embeddings")
def test_model_get_set_embeddings(self):
pass
def prepare_batch(repo_id="ibm/patchtsmixer-etth1-test-data", file="pretrain_batch.pt"):
# TODO: Make repo public
file = hf_hub_download(repo_id=repo_id, filename=file, repo_type="dataset")
check_torch_load_is_safe()
batch = torch.load(file, map_location=torch_device, weights_only=True)
return batch
@require_torch
@slow
class PatchTSMixerModelIntegrationTests(unittest.TestCase):
def test_pretrain_head(self):
model = PatchTSMixerForPretraining.from_pretrained("ibm/patchtsmixer-etth1-pretrain").to(torch_device)
batch = prepare_batch()
torch.manual_seed(0)
with torch.no_grad():
output = model(past_values=batch["past_values"].to(torch_device)).prediction_outputs
num_patch = (
max(model.config.context_length, model.config.patch_length) - model.config.patch_length
) // model.config.patch_stride + 1
expected_shape = torch.Size(
[
64,
model.config.num_input_channels,
num_patch,
model.config.patch_length,
]
)
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[[-0.9106]],[[1.5326]],[[-0.8245]],[[0.7439]],[[-0.7830]],[[2.6256]],[[-0.6485]],],device=torch_device) # fmt: skip
torch.testing.assert_close(output[0, :7, :1, :1], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_forecasting_head(self):
model = PatchTSMixerForPrediction.from_pretrained("ibm/patchtsmixer-etth1-forecasting").to(torch_device)
batch = prepare_batch(file="forecast_batch.pt")
model.eval()
torch.manual_seed(0)
with torch.no_grad():
output = model(
past_values=batch["past_values"].to(torch_device),
future_values=batch["future_values"].to(torch_device),
).prediction_outputs
expected_shape = torch.Size([64, model.config.prediction_length, model.config.num_input_channels])
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[0.2471, 0.5036, 0.3596, 0.5401, -0.0985, 0.3423, -0.8439]],
device=torch_device,
)
torch.testing.assert_close(output[0, :1, :7], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_prediction_generation(self):
model = PatchTSMixerForPrediction.from_pretrained("ibm/patchtsmixer-etth1-generate").to(torch_device)
batch = prepare_batch(file="forecast_batch.pt")
print(batch["past_values"])
torch.manual_seed(0)
model.eval()
with torch.no_grad():
outputs = model.generate(past_values=batch["past_values"].to(torch_device))
expected_shape = torch.Size((64, 1, model.config.prediction_length, model.config.num_input_channels))
self.assertEqual(outputs.sequences.shape, expected_shape)
expected_slice = torch.tensor(
[[0.4308, -0.4731, 1.3512, -0.1038, -0.4655, 1.1279, -0.7179]],
device=torch_device,
)
mean_prediction = outputs.sequences.mean(dim=1)
torch.testing.assert_close(mean_prediction[0, -1:], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
@require_torch
class PatchTSMixerFunctionalTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup method: Called once before test-cases execution"""
cls.params = {}
cls.params.update(
context_length=32,
patch_length=8,
num_input_channels=3,
patch_stride=8,
d_model=4,
expansion_factor=2,
num_layers=3,
dropout=0.2,
mode="common_channel", # common_channel, mix_channel
gated_attn=True,
norm_mlp="LayerNorm",
mask_type="random",
random_mask_ratio=0.5,
mask_patches=[2, 3],
forecast_mask_ratios=[1, 1],
mask_value=0,
masked_loss=True,
channel_consistent_masking=True,
head_dropout=0.2,
prediction_length=64,
out_channels=None,
# num_labels=3,
num_targets=3,
output_range=None,
head_aggregation=None,
scaling="std",
use_positional_encoding=False,
positional_encoding="sincos",
self_attn=False,
self_attn_heads=1,
num_parallel_samples=4,
)
cls.num_patches = (
max(cls.params["context_length"], cls.params["patch_length"]) - cls.params["patch_length"]
) // cls.params["patch_stride"] + 1
# batch_size = 32
batch_size = 2
int(cls.params["prediction_length"] / cls.params["patch_length"])
cls.data = torch.rand(
batch_size,
cls.params["context_length"],
cls.params["num_input_channels"],
)
cls.enc_data = torch.rand(
batch_size,
cls.params["num_input_channels"],
cls.num_patches,
cls.params["patch_length"],
)
cls.enc_output = torch.rand(
batch_size,
cls.params["num_input_channels"],
cls.num_patches,
cls.params["d_model"],
)
cls.flat_enc_output = torch.rand(
batch_size,
cls.num_patches,
cls.params["d_model"],
)
cls.correct_pred_output = torch.rand(
batch_size,
cls.params["prediction_length"],
cls.params["num_input_channels"],
)
cls.correct_regression_output = torch.rand(batch_size, cls.params["num_targets"])
cls.correct_pretrain_output = torch.rand(
batch_size,
cls.params["num_input_channels"],
cls.num_patches,
cls.params["patch_length"],
)
cls.correct_forecast_output = torch.rand(
batch_size,
cls.params["prediction_length"],
cls.params["num_input_channels"],
)
cls.correct_sel_forecast_output = torch.rand(batch_size, cls.params["prediction_length"], 2)
cls.correct_classification_output = torch.rand(
batch_size,
cls.params["num_targets"],
)
cls.correct_classification_classes = torch.randint(0, cls.params["num_targets"], (batch_size,))
def test_patchtsmixer_encoder(self):
config = PatchTSMixerConfig(**self.__class__.params)
enc = PatchTSMixerEncoder(config)
output = enc(self.__class__.enc_data)
self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape)
def test_patchmodel(self):
config = PatchTSMixerConfig(**self.__class__.params)
mdl = PatchTSMixerModel(config)
output = mdl(self.__class__.data)
self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape)
self.assertEqual(output.patch_input.shape, self.__class__.enc_data.shape)
def test_pretrainhead(self):
config = PatchTSMixerConfig(**self.__class__.params)
head = PatchTSMixerPretrainHead(
config=config,
)
output = head(self.__class__.enc_output)
self.assertEqual(output.shape, self.__class__.correct_pretrain_output.shape)
def test_pretrain_full(self):
config = PatchTSMixerConfig(**self.__class__.params)
mdl = PatchTSMixerForPretraining(config)
output = mdl(self.__class__.data)
self.assertEqual(
output.prediction_outputs.shape,
self.__class__.correct_pretrain_output.shape,
)
self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape)
self.assertEqual(output.loss.item() < np.inf, True)
def test_pretrain_full_with_return_dict(self):
config = PatchTSMixerConfig(**self.__class__.params)
mdl = PatchTSMixerForPretraining(config)
output = mdl(self.__class__.data, return_dict=False)
self.assertEqual(output[1].shape, self.__class__.correct_pretrain_output.shape)
self.assertEqual(output[2].shape, self.__class__.enc_output.shape)
self.assertEqual(output[0].item() < np.inf, True)
def test_forecast_head(self):
config = PatchTSMixerConfig(**self.__class__.params)
head = PatchTSMixerForPredictionHead(
config=config,
)
# output = head(self.__class__.enc_output, raw_data = self.__class__.correct_pretrain_output)
output = head(self.__class__.enc_output)
self.assertEqual(output.shape, self.__class__.correct_forecast_output.shape)
def check_module(
self,
task,
params=None,
output_hidden_states=True,
):
config = PatchTSMixerConfig(**params)
if task == "forecast":
mdl = PatchTSMixerForPrediction(config)
target_input = self.__class__.correct_forecast_output
if config.prediction_channel_indices is not None:
target_output = self.__class__.correct_sel_forecast_output
else:
target_output = target_input
ref_samples = target_output.unsqueeze(1).expand(-1, config.num_parallel_samples, -1, -1)
ground_truth_arg = "future_values"
output_predictions_arg = "prediction_outputs"
elif task == "classification":
mdl = PatchTSMixerForTimeSeriesClassification(config)
target_input = self.__class__.correct_classification_classes
target_output = self.__class__.correct_classification_output
ground_truth_arg = "target_values"
output_predictions_arg = "prediction_outputs"
elif task == "regression":
mdl = PatchTSMixerForRegression(config)
target_input = self.__class__.correct_regression_output
target_output = self.__class__.correct_regression_output
ref_samples = target_output.unsqueeze(1).expand(-1, config.num_parallel_samples, -1)
ground_truth_arg = "target_values"
output_predictions_arg = "regression_outputs"
elif task == "pretrain":
mdl = PatchTSMixerForPretraining(config)
target_input = None
target_output = self.__class__.correct_pretrain_output
ground_truth_arg = None
output_predictions_arg = "prediction_outputs"
else:
print("invalid task")
enc_output = self.__class__.enc_output
if target_input is None:
output = mdl(self.__class__.data, output_hidden_states=output_hidden_states)
else:
output = mdl(
self.__class__.data,
**{
ground_truth_arg: target_input,
"output_hidden_states": output_hidden_states,
},
)
prediction_outputs = getattr(output, output_predictions_arg)
if isinstance(prediction_outputs, tuple):
for t in prediction_outputs:
self.assertEqual(t.shape, target_output.shape)
else:
self.assertEqual(prediction_outputs.shape, target_output.shape)
self.assertEqual(output.last_hidden_state.shape, enc_output.shape)
if output_hidden_states is True:
self.assertEqual(len(output.hidden_states), params["num_layers"])
else:
self.assertEqual(output.hidden_states, None)
self.assertEqual(output.loss.item() < np.inf, True)
if config.loss == "nll" and task in ["forecast", "regression"]:
samples = mdl.generate(self.__class__.data)
self.assertEqual(samples.sequences.shape, ref_samples.shape)
@parameterized.expand(
list(
itertools.product(
["common_channel", "mix_channel"],
[True, False],
[True, False, "mean", "std"],
[True, False],
[None, [0, 2]],
["mse", "nll"],
)
)
)
def test_forecast(self, mode, self_attn, scaling, gated_attn, prediction_channel_indices, loss):
params = self.__class__.params.copy()
params.update(
mode=mode,
self_attn=self_attn,
scaling=scaling,
prediction_channel_indices=prediction_channel_indices,
gated_attn=gated_attn,
loss=loss,
)
self.check_module(task="forecast", params=params)
@parameterized.expand(
list(
itertools.product(
["common_channel", "mix_channel"],
[True, False],
[True, False, "mean", "std"],
[True, False],
["max_pool", "avg_pool"],
)
)
)
def test_classification(self, mode, self_attn, scaling, gated_attn, head_aggregation):
params = self.__class__.params.copy()
params.update(
mode=mode,
self_attn=self_attn,
scaling=scaling,
head_aggregation=head_aggregation,
gated_attn=gated_attn,
)
self.check_module(task="classification", params=params)
@parameterized.expand(
list(
itertools.product(
["common_channel", "mix_channel"],
[True, False],
[True, False, "mean", "std"],
[True, False],
["max_pool", "avg_pool"],
["mse", "nll"],
)
)
)
def test_regression(self, mode, self_attn, scaling, gated_attn, head_aggregation, loss):
params = self.__class__.params.copy()
params.update(
mode=mode,
self_attn=self_attn,
scaling=scaling,
head_aggregation=head_aggregation,
gated_attn=gated_attn,
loss=loss,
)
self.check_module(task="regression", params=params)
@parameterized.expand(
list(
itertools.product(
["common_channel", "mix_channel"],
[True, False],
[True, False, "mean", "std"],
[True, False],
["random", "forecast"],
[True, False],
[True, False],
)
)
)
def test_pretrain(
self,
mode,
self_attn,
scaling,
gated_attn,
mask_type,
masked_loss,
channel_consistent_masking,
):
params = self.__class__.params.copy()
params.update(
mode=mode,
self_attn=self_attn,
scaling=scaling,
gated_attn=gated_attn,
mask_type=mask_type,
masked_loss=masked_loss,
channel_consistent_masking=channel_consistent_masking,
)
self.check_module(task="pretrain", params=params)
def forecast_full_module(self, params=None, output_hidden_states=False, return_dict=None):
config = PatchTSMixerConfig(**params)
mdl = PatchTSMixerForPrediction(config)
target_val = self.__class__.correct_forecast_output
if config.prediction_channel_indices is not None:
target_val = self.__class__.correct_sel_forecast_output
enc_output = self.__class__.enc_output
output = mdl(
self.__class__.data,
future_values=self.__class__.correct_forecast_output,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if isinstance(output, tuple):
output = PatchTSMixerForPredictionOutput(*output)
if config.loss == "mse":
self.assertEqual(output.prediction_outputs.shape, target_val.shape)
self.assertEqual(output.last_hidden_state.shape, enc_output.shape)
if output_hidden_states is True:
self.assertEqual(len(output.hidden_states), params["num_layers"])
else:
self.assertEqual(output.hidden_states, None)
self.assertEqual(output.loss.item() < np.inf, True)
if config.loss == "nll":
samples = mdl.generate(self.__class__.data)
ref_samples = target_val.unsqueeze(1).expand(-1, params["num_parallel_samples"], -1, -1)
self.assertEqual(samples.sequences.shape, ref_samples.shape)
def test_forecast_full(self):
self.check_module(task="forecast", params=self.__class__.params, output_hidden_states=True)
# self.forecast_full_module(self.__class__.params, output_hidden_states = True)
def test_forecast_full_2(self):
params = self.__class__.params.copy()
params.update(
mode="mix_channel",
)
self.forecast_full_module(params, output_hidden_states=True)
def test_forecast_full_2_with_return_dict(self):
params = self.__class__.params.copy()
params.update(
mode="mix_channel",
)
self.forecast_full_module(params, output_hidden_states=True, return_dict=False)
def test_forecast_full_3(self):
params = self.__class__.params.copy()
params.update(
mode="mix_channel",
)
self.forecast_full_module(params, output_hidden_states=True)
def test_forecast_full_5(self):
params = self.__class__.params.copy()
params.update(
self_attn=True,
use_positional_encoding=True,
positional_encoding="sincos",
)
self.forecast_full_module(params, output_hidden_states=True)
def test_forecast_full_4(self):
params = self.__class__.params.copy()
params.update(
mode="mix_channel",
prediction_channel_indices=[0, 2],
)
self.forecast_full_module(params)
def test_forecast_full_distributional(self):
params = self.__class__.params.copy()
params.update(
mode="mix_channel",
prediction_channel_indices=[0, 2],
loss="nll",
distribution_output="normal",
)
self.forecast_full_module(params)
def test_forecast_full_distributional_2(self):
params = self.__class__.params.copy()
params.update(
mode="mix_channel",
prediction_channel_indices=[0, 2],
loss="nll",
# distribution_output = "normal",
)
self.forecast_full_module(params)
def test_forecast_full_distributional_3(self):
params = self.__class__.params.copy()
params.update(
mode="mix_channel",
# prediction_channel_indices=[0, 2],
loss="nll",
distribution_output="normal",
)
self.forecast_full_module(params)
def test_forecast_full_distributional_4(self):
params = self.__class__.params.copy()
params.update(
mode="mix_channel",
# prediction_channel_indices=[0, 2],
loss="nll",
distribution_output="normal",
)
self.forecast_full_module(params)
def test_classification_head(self):
config = PatchTSMixerConfig(**self.__class__.params)
head = PatchTSMixerLinearHead(
config=config,
)
# output = head(self.__class__.enc_output, raw_data = self.__class__.correct_pretrain_output)
output = head(self.__class__.enc_output)
self.assertEqual(output.shape, self.__class__.correct_classification_output.shape)
def test_classification_full(self):
config = PatchTSMixerConfig(**self.__class__.params)
mdl = PatchTSMixerForTimeSeriesClassification(config)
output = mdl(
self.__class__.data,
target_values=self.__class__.correct_classification_classes,
)
self.assertEqual(
output.prediction_outputs.shape,
self.__class__.correct_classification_output.shape,
)
self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape)
self.assertEqual(output.loss.item() < np.inf, True)
def test_classification_full_with_return_dict(self):
config = PatchTSMixerConfig(**self.__class__.params)
mdl = PatchTSMixerForTimeSeriesClassification(config)
output = mdl(
self.__class__.data,
target_values=self.__class__.correct_classification_classes,
return_dict=False,
)
if isinstance(output, tuple):
output = PatchTSMixerForTimeSeriesClassificationOutput(*output)
self.assertEqual(
output.prediction_outputs.shape,
self.__class__.correct_classification_output.shape,
)
self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape)
self.assertEqual(output.loss.item() < np.inf, True)
def test_regression_head(self):
config = PatchTSMixerConfig(**self.__class__.params)
head = PatchTSMixerLinearHead(
config=config,
)
output = head(self.__class__.enc_output)
self.assertEqual(output.shape, self.__class__.correct_regression_output.shape)
def test_regression_full(self):
config = PatchTSMixerConfig(**self.__class__.params)
mdl = PatchTSMixerForRegression(config)
output = mdl(self.__class__.data, target_values=self.__class__.correct_regression_output)
self.assertEqual(
output.regression_outputs.shape,
self.__class__.correct_regression_output.shape,
)
self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape)
self.assertEqual(output.loss.item() < np.inf, True)
def test_regression_full_with_return_dict(self):
config = PatchTSMixerConfig(**self.__class__.params)
mdl = PatchTSMixerForRegression(config)
output = mdl(
self.__class__.data,
target_values=self.__class__.correct_regression_output,
return_dict=False,
)
if isinstance(output, tuple):
output = PatchTSMixerForRegressionOutput(*output)
self.assertEqual(
output.regression_outputs.shape,
self.__class__.correct_regression_output.shape,
)
self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape)
self.assertEqual(output.loss.item() < np.inf, True)
def test_regression_full_distribute(self):
params = self.__class__.params.copy()
params.update(loss="nll", distribution_output="normal")
config = PatchTSMixerConfig(**params)
mdl = PatchTSMixerForRegression(config)
output = mdl(self.__class__.data, target_values=self.__class__.correct_regression_output)
self.assertEqual(
output.regression_outputs[0].shape,
self.__class__.correct_regression_output.shape,
)
self.assertEqual(
output.regression_outputs[1].shape,
self.__class__.correct_regression_output.shape,
)
self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape)
self.assertEqual(output.loss.item() < np.inf, True)
if config.loss == "nll":
samples = mdl.generate(self.__class__.data)
ref_samples = self.__class__.correct_regression_output.unsqueeze(1).expand(
-1, params["num_parallel_samples"], -1
)
self.assertEqual(samples.sequences.shape, ref_samples.shape)
def test_regression_full_distribute_2(self):
params = self.__class__.params.copy()
params.update(loss="nll", distribution_output="student_t")
config = PatchTSMixerConfig(**params)
mdl = PatchTSMixerForRegression(config)
output = mdl(self.__class__.data, target_values=self.__class__.correct_regression_output)
self.assertEqual(
output.regression_outputs[0].shape,
self.__class__.correct_regression_output.shape,
)
self.assertEqual(
output.regression_outputs[1].shape,
self.__class__.correct_regression_output.shape,
)
self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape)
self.assertEqual(output.loss.item() < np.inf, True)
if config.loss == "nll":
samples = mdl.generate(self.__class__.data)
ref_samples = self.__class__.correct_regression_output.unsqueeze(1).expand(
-1, params["num_parallel_samples"], -1
)
self.assertEqual(samples.sequences.shape, ref_samples.shape)
| transformers/tests/models/patchtsmixer/test_modeling_patchtsmixer.py/0 | {
"file_path": "transformers/tests/models/patchtsmixer/test_modeling_patchtsmixer.py",
"repo_id": "transformers",
"token_count": 20483
} | 590 |
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import Pix2StructImageProcessor
class Pix2StructImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
size=None,
do_normalize=True,
do_convert_rgb=True,
patch_size=None,
):
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.size = size
self.do_normalize = do_normalize
self.do_convert_rgb = do_convert_rgb
self.max_patches = [512, 1024, 2048, 4096]
self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
def prepare_image_processor_dict(self):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def prepare_dummy_image(self):
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class Pix2StructImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Pix2StructImageProcessor if is_vision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Pix2StructImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_expected_patches(self):
dummy_image = self.image_processor_tester.prepare_dummy_image()
image_processor = self.image_processing_class(**self.image_processor_dict)
max_patch = 2048
inputs = image_processor(dummy_image, return_tensors="pt", max_patches=max_patch)
torch.testing.assert_close(inputs.flattened_patches.mean(), torch.tensor(0.0606), rtol=1e-3, atol=1e-3)
def test_call_pil(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
def test_call_vqa(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
image_processor.is_vqa = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(ValueError):
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
dummy_text = "Hello"
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch, header_text=dummy_text
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch, header_text=dummy_text
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
def test_call_numpy(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
def test_call_numpy_4_channels(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
self.image_processor_tester.num_channels = 4
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch, input_data_format="channels_last"
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch, input_data_format="channels_last"
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
self.image_processor_tester.num_channels = 3
def test_call_pytorch(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
@require_torch
@require_vision
class Pix2StructImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Pix2StructImageProcessor if is_vision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Pix2StructImageProcessingTester(self, num_channels=4)
self.expected_encoded_image_num_channels = 3
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_call_pil(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
@unittest.skip(reason="Pix2StructImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_numpy(self):
return super().test_call_numpy()
@unittest.skip(reason="Pix2StructImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_pytorch(self):
return super().test_call_torch()
@unittest.skip(
reason="Pix2StructImageProcessor does treat numpy and PIL 4 channel images consistently"
) # FIXME Amy
def test_call_numpy_4_channels(self):
return super().test_call_torch()
| transformers/tests/models/pix2struct/test_image_processing_pix2struct.py/0 | {
"file_path": "transformers/tests/models/pix2struct/test_image_processing_pix2struct.py",
"repo_id": "transformers",
"token_count": 6144
} | 591 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from datasets import load_dataset
from transformers.testing_utils import (
require_essentia,
require_librosa,
require_pretty_midi,
require_scipy,
require_torch,
)
from transformers.tokenization_utils import BatchEncoding
from transformers.utils.import_utils import (
is_essentia_available,
is_librosa_available,
is_pretty_midi_available,
is_scipy_available,
is_torch_available,
)
requirements_available = (
is_torch_available()
and is_essentia_available()
and is_scipy_available()
and is_librosa_available()
and is_pretty_midi_available()
)
if requirements_available:
import pretty_midi
from transformers import (
Pop2PianoFeatureExtractor,
Pop2PianoForConditionalGeneration,
Pop2PianoProcessor,
Pop2PianoTokenizer,
)
@require_scipy
@require_torch
@require_librosa
@require_essentia
@require_pretty_midi
class Pop2PianoProcessorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
feature_extractor = Pop2PianoFeatureExtractor.from_pretrained("sweetcocoa/pop2piano")
tokenizer = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano")
processor = Pop2PianoProcessor(feature_extractor, tokenizer)
processor.save_pretrained(cls.tmpdirname)
def get_tokenizer(self, **kwargs):
return Pop2PianoTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_feature_extractor(self, **kwargs):
return Pop2PianoFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def test_save_load_pretrained_additional_features(self):
with tempfile.TemporaryDirectory() as tmpdir:
processor = Pop2PianoProcessor(
tokenizer=self.get_tokenizer(),
feature_extractor=self.get_feature_extractor(),
)
processor.save_pretrained(tmpdir)
tokenizer_add_kwargs = self.get_tokenizer(
unk_token="-1",
eos_token="1",
pad_token="0",
bos_token="2",
)
feature_extractor_add_kwargs = self.get_feature_extractor()
processor = Pop2PianoProcessor.from_pretrained(
tmpdir,
unk_token="-1",
eos_token="1",
pad_token="0",
bos_token="2",
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, Pop2PianoTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, Pop2PianoFeatureExtractor)
def get_inputs(self):
"""get inputs for both feature extractor and tokenizer"""
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
speech_samples = ds.sort("id").select([0])["audio"]
input_speech = [x["array"] for x in speech_samples][0]
sampling_rate = [x["sampling_rate"] for x in speech_samples][0]
feature_extractor_outputs = self.get_feature_extractor()(
audio=input_speech, sampling_rate=sampling_rate, return_tensors="pt"
)
model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano")
token_ids = model.generate(input_features=feature_extractor_outputs["input_features"], composer="composer1")
dummy_notes = [
[
pretty_midi.Note(start=0.441179, end=2.159456, pitch=70, velocity=77),
pretty_midi.Note(start=0.673379, end=0.905578, pitch=73, velocity=77),
pretty_midi.Note(start=0.905578, end=2.159456, pitch=73, velocity=77),
pretty_midi.Note(start=1.114558, end=2.159456, pitch=78, velocity=77),
pretty_midi.Note(start=1.323537, end=1.532517, pitch=80, velocity=77),
],
[
pretty_midi.Note(start=0.441179, end=2.159456, pitch=70, velocity=77),
],
]
return input_speech, sampling_rate, token_ids, dummy_notes
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = Pop2PianoProcessor(
tokenizer=tokenizer,
feature_extractor=feature_extractor,
)
input_speech, sampling_rate, _, _ = self.get_inputs()
feature_extractor_outputs = feature_extractor(
audio=input_speech, sampling_rate=sampling_rate, return_tensors="np"
)
processor_outputs = processor(audio=input_speech, sampling_rate=sampling_rate, return_tensors="np")
for key in feature_extractor_outputs:
self.assertTrue(np.allclose(feature_extractor_outputs[key], processor_outputs[key], atol=1e-4))
def test_processor_batch_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = Pop2PianoProcessor(
tokenizer=tokenizer,
feature_extractor=feature_extractor,
)
audio, sampling_rate, token_ids, _ = self.get_inputs()
feature_extractor_output = feature_extractor(audio=audio, sampling_rate=sampling_rate, return_tensors="pt")
encoded_processor = processor.batch_decode(
token_ids=token_ids,
feature_extractor_output=feature_extractor_output,
return_midi=True,
)
encoded_tokenizer = tokenizer.batch_decode(
token_ids=token_ids,
feature_extractor_output=feature_extractor_output,
return_midi=True,
)
# check start timings
encoded_processor_start_timings = [token.start for token in encoded_processor["notes"]]
encoded_tokenizer_start_timings = [token.start for token in encoded_tokenizer["notes"]]
self.assertListEqual(encoded_processor_start_timings, encoded_tokenizer_start_timings)
# check end timings
encoded_processor_end_timings = [token.end for token in encoded_processor["notes"]]
encoded_tokenizer_end_timings = [token.end for token in encoded_tokenizer["notes"]]
self.assertListEqual(encoded_processor_end_timings, encoded_tokenizer_end_timings)
# check pitch
encoded_processor_pitch = [token.pitch for token in encoded_processor["notes"]]
encoded_tokenizer_pitch = [token.pitch for token in encoded_tokenizer["notes"]]
self.assertListEqual(encoded_processor_pitch, encoded_tokenizer_pitch)
# check velocity
encoded_processor_velocity = [token.velocity for token in encoded_processor["notes"]]
encoded_tokenizer_velocity = [token.velocity for token in encoded_tokenizer["notes"]]
self.assertListEqual(encoded_processor_velocity, encoded_tokenizer_velocity)
def test_tokenizer_call(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = Pop2PianoProcessor(
tokenizer=tokenizer,
feature_extractor=feature_extractor,
)
_, _, _, notes = self.get_inputs()
encoded_processor = processor(
notes=notes,
)
self.assertTrue(isinstance(encoded_processor, BatchEncoding))
def test_processor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = Pop2PianoProcessor(
tokenizer=tokenizer,
feature_extractor=feature_extractor,
)
audio, sampling_rate, _, notes = self.get_inputs()
inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
notes=notes,
)
self.assertListEqual(
list(inputs.keys()),
["input_features", "beatsteps", "extrapolated_beatstep", "token_ids"],
)
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
| transformers/tests/models/pop2piano/test_processing_pop2piano.py/0 | {
"file_path": "transformers/tests/models/pop2piano/test_processing_pop2piano.py",
"repo_id": "transformers",
"token_count": 3819
} | 592 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch RemBERT model."""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
RemBertConfig,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertModel,
)
class RemBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
input_embedding_size=18,
output_embedding_size=43,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.input_embedding_size = input_embedding_size
self.output_embedding_size = output_embedding_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = RemBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
input_embedding_size=self.input_embedding_size,
output_embedding_size=self.output_embedding_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RemBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = RemBertModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RemBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = RemBertForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RemBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = RemBertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = RemBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = RemBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class RemBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
RemBertModel,
RemBertForMaskedLM,
RemBertForCausalLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
)
if is_torch_available()
else ()
)
# Doesn't run generation tests. There are interface mismatches when using `generate` -- TODO @gante
all_generative_model_classes = ()
pipeline_model_mapping = (
{
"feature-extraction": RemBertModel,
"fill-mask": RemBertForMaskedLM,
"question-answering": RemBertForQuestionAnswering,
"text-classification": RemBertForSequenceClassification,
"text-generation": RemBertForCausalLM,
"token-classification": RemBertForTokenClassification,
"zero-shot": RemBertForSequenceClassification,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = RemBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=RemBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
@slow
def test_model_from_pretrained(self):
model_name = "google/rembert"
model = RemBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class RemBertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_model(self):
# Test exact values at the last hidden layer
model = RemBertModel.from_pretrained("google/rembert")
input_ids = torch.tensor([[312, 56498, 313, 2125, 313]])
segment_ids = torch.tensor([[0, 0, 0, 1, 1]])
with torch.no_grad():
output = model(input_ids, token_type_ids=segment_ids, output_hidden_states=True)
hidden_size = 1152
expected_shape = torch.Size((1, 5, hidden_size))
self.assertEqual(output["last_hidden_state"].shape, expected_shape)
expected_implementation = torch.tensor(
[
[
[0.0754, -0.2022, 0.1904],
[-0.3354, -0.3692, -0.4791],
[-0.2314, -0.6729, -0.0749],
[-0.0396, -0.3105, -0.4234],
[-0.1571, -0.0525, 0.5353],
]
]
)
# Running on the original tf implementation gives slightly different results here.
# Not clear why this variations is present
# TODO: Find reason for discrepancy
# expected_original_implementation = [[
# [0.07630594074726105, -0.20146065950393677, 0.19107051193714142],
# [-0.3405614495277405, -0.36971670389175415, -0.4808273911476135],
# [-0.22587086260318756, -0.6656315922737122, -0.07844287157058716],
# [-0.04145475849509239, -0.3077218234539032, -0.42316967248916626],
# [-0.15887849032878876, -0.054529931396245956, 0.5356100797653198]
# ]]
torch.testing.assert_close(
output["last_hidden_state"][:, :, :3], expected_implementation, rtol=1e-4, atol=1e-4
)
| transformers/tests/models/rembert/test_modeling_rembert.py/0 | {
"file_path": "transformers/tests/models/rembert/test_modeling_rembert.py",
"repo_id": "transformers",
"token_count": 8954
} | 593 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import requests
from transformers.testing_utils import (
is_flaky,
require_torch,
require_torch_accelerator,
require_torchvision,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from PIL import Image
from transformers import RTDetrImageProcessor, RTDetrImageProcessorFast
if is_torch_available():
import torch
class RTDetrImageProcessingTester:
def __init__(
self,
parent,
batch_size=4,
num_channels=3,
do_resize=True,
size=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=False,
do_pad=False,
return_tensors="pt",
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.do_resize = do_resize
self.size = size if size is not None else {"height": 640, "width": 640}
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.do_pad = do_pad
self.return_tensors = return_tensors
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"do_pad": self.do_pad,
"return_tensors": self.return_tensors,
}
def get_expected_values(self):
return self.size["height"], self.size["width"]
def expected_output_image_shape(self, images):
height, width = self.get_expected_values()
return self.num_channels, height, width
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=30,
max_resolution=400,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class RtDetrImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = RTDetrImageProcessor if is_vision_available() else None
fast_image_processing_class = RTDetrImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = RTDetrImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "resample"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "return_tensors"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 640, "width": 640})
def test_valid_coco_detection_annotations(self):
# prepare image and target
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
params = {"image_id": 39769, "annotations": target}
for image_processing_class in self.image_processor_list:
# encode them
image_processing = image_processing_class.from_pretrained("PekingU/rtdetr_r50vd")
# legal encodings (single image)
_ = image_processing(images=image, annotations=params, return_tensors="pt")
_ = image_processing(images=image, annotations=[params], return_tensors="pt")
# legal encodings (batch of one image)
_ = image_processing(images=[image], annotations=params, return_tensors="pt")
_ = image_processing(images=[image], annotations=[params], return_tensors="pt")
# legal encoding (batch of more than one image)
n = 5
_ = image_processing(images=[image] * n, annotations=[params] * n, return_tensors="pt")
# example of an illegal encoding (missing the 'image_id' key)
with self.assertRaises(ValueError) as e:
image_processing(images=image, annotations={"annotations": target}, return_tensors="pt")
self.assertTrue(str(e.exception).startswith("Invalid COCO detection annotations"))
# example of an illegal encoding (unequal lengths of images and annotations)
with self.assertRaises(ValueError) as e:
image_processing(images=[image] * n, annotations=[params] * (n - 1), return_tensors="pt")
self.assertTrue(str(e.exception) == "The number of images (5) and annotations (4) do not match.")
@slow
def test_call_pytorch_with_coco_detection_annotations(self):
# prepare image and target
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
target = {"image_id": 39769, "annotations": target}
for image_processing_class in self.image_processor_list:
# encode them
image_processing = image_processing_class.from_pretrained("PekingU/rtdetr_r50vd")
encoding = image_processing(images=image, annotations=target, return_tensors="pt")
# verify pixel values
expected_shape = torch.Size([1, 3, 640, 640])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.5490, 0.5647, 0.5725])
torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([2827.9883, 5403.4761, 235036.7344, 402070.2188, 71068.8281, 79601.2812])
torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([640, 640])
torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
def test_image_processor_outputs(self):
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
encoding = image_processing(images=image, return_tensors="pt")
# verify pixel values: shape
expected_shape = torch.Size([1, 3, 640, 640])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
# verify pixel values: output values
expected_slice = torch.tensor([0.5490196347236633, 0.5647059082984924, 0.572549045085907])
torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-5, atol=1e-5)
def test_multiple_images_processor_outputs(self):
images_urls = [
"http://images.cocodataset.org/val2017/000000000139.jpg",
"http://images.cocodataset.org/val2017/000000000285.jpg",
"http://images.cocodataset.org/val2017/000000000632.jpg",
"http://images.cocodataset.org/val2017/000000000724.jpg",
"http://images.cocodataset.org/val2017/000000000776.jpg",
"http://images.cocodataset.org/val2017/000000000785.jpg",
"http://images.cocodataset.org/val2017/000000000802.jpg",
"http://images.cocodataset.org/val2017/000000000872.jpg",
]
images = []
for url in images_urls:
image = Image.open(requests.get(url, stream=True).raw)
images.append(image)
for image_processing_class in self.image_processor_list:
# apply image processing
image_processing = image_processing_class(**self.image_processor_dict)
encoding = image_processing(images=images, return_tensors="pt")
# verify if pixel_values is part of the encoding
self.assertIn("pixel_values", encoding)
# verify pixel values: shape
expected_shape = torch.Size([8, 3, 640, 640])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
# verify pixel values: output values
expected_slices = torch.tensor(
[
[0.5333333611488342, 0.5568627715110779, 0.5647059082984924],
[0.5372549295425415, 0.4705882668495178, 0.4274510145187378],
[0.3960784673690796, 0.35686275362968445, 0.3686274588108063],
[0.20784315466880798, 0.1882353127002716, 0.15294118225574493],
[0.364705890417099, 0.364705890417099, 0.3686274588108063],
[0.8078432083129883, 0.8078432083129883, 0.8078432083129883],
[0.4431372880935669, 0.4431372880935669, 0.4431372880935669],
[0.19607844948768616, 0.21176472306251526, 0.3607843220233917],
]
)
torch.testing.assert_close(encoding["pixel_values"][:, 1, 0, :3], expected_slices, rtol=1e-5, atol=1e-5)
@slow
def test_batched_coco_detection_annotations(self):
image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
annotations_0 = {"image_id": 39769, "annotations": target}
annotations_1 = {"image_id": 39769, "annotations": target}
# Adjust the bounding boxes for the resized image
w_0, h_0 = image_0.size
w_1, h_1 = image_1.size
for i in range(len(annotations_1["annotations"])):
coords = annotations_1["annotations"][i]["bbox"]
new_bbox = [
coords[0] * w_1 / w_0,
coords[1] * h_1 / h_0,
coords[2] * w_1 / w_0,
coords[3] * h_1 / h_0,
]
annotations_1["annotations"][i]["bbox"] = new_bbox
images = [image_0, image_1]
annotations = [annotations_0, annotations_1]
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class()
encoding = image_processing(
images=images,
annotations=annotations,
return_segmentation_masks=True,
return_tensors="pt", # do_convert_annotations=True
)
# Check the pixel values have been padded
postprocessed_height, postprocessed_width = 640, 640
expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
# Check the bounding boxes have been adjusted for padded images
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
expected_boxes_0 = torch.tensor(
[
[0.6879, 0.4609, 0.0755, 0.3691],
[0.2118, 0.3359, 0.2601, 0.1566],
[0.5011, 0.5000, 0.9979, 1.0000],
[0.5010, 0.5020, 0.9979, 0.9959],
[0.3284, 0.5944, 0.5884, 0.8112],
[0.8394, 0.5445, 0.3213, 0.9110],
]
)
expected_boxes_1 = torch.tensor(
[
[0.5503, 0.2765, 0.0604, 0.2215],
[0.1695, 0.2016, 0.2080, 0.0940],
[0.5006, 0.4933, 0.9977, 0.9865],
[0.5008, 0.5002, 0.9983, 0.9955],
[0.2627, 0.5456, 0.4707, 0.8646],
[0.7715, 0.4115, 0.4570, 0.7161],
]
)
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
# format and not in the range [0, 1]
encoding = image_processing(
images=images,
annotations=annotations,
return_segmentation_masks=True,
do_convert_annotations=False,
return_tensors="pt",
)
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
# Convert to absolute coordinates
unnormalized_boxes_0 = torch.vstack(
[
expected_boxes_0[:, 0] * postprocessed_width,
expected_boxes_0[:, 1] * postprocessed_height,
expected_boxes_0[:, 2] * postprocessed_width,
expected_boxes_0[:, 3] * postprocessed_height,
]
).T
unnormalized_boxes_1 = torch.vstack(
[
expected_boxes_1[:, 0] * postprocessed_width,
expected_boxes_1[:, 1] * postprocessed_height,
expected_boxes_1[:, 2] * postprocessed_width,
expected_boxes_1[:, 3] * postprocessed_height,
]
).T
# Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
expected_boxes_0 = torch.vstack(
[
unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
]
).T
expected_boxes_1 = torch.vstack(
[
unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
@slow
@require_torch_accelerator
@require_torchvision
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_fast_processor_equivalence_cpu_accelerator_coco_detection_annotations
def test_fast_processor_equivalence_cpu_accelerator_coco_detection_annotations(self):
# prepare image and target
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
target = {"image_id": 39769, "annotations": target}
processor = self.image_processor_list[1]()
# 1. run processor on CPU
encoding_cpu = processor(images=image, annotations=target, return_tensors="pt", device="cpu")
# 2. run processor on accelerator
encoding_gpu = processor(images=image, annotations=target, return_tensors="pt", device=torch_device)
# verify pixel values
self.assertEqual(encoding_cpu["pixel_values"].shape, encoding_gpu["pixel_values"].shape)
self.assertTrue(
torch.allclose(
encoding_cpu["pixel_values"][0, 0, 0, :3],
encoding_gpu["pixel_values"][0, 0, 0, :3].to("cpu"),
atol=1e-4,
)
)
# verify area
torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu"))
# verify boxes
self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape)
self.assertTrue(
torch.allclose(
encoding_cpu["labels"][0]["boxes"][0], encoding_gpu["labels"][0]["boxes"][0].to("cpu"), atol=1e-3
)
)
# verify image_id
torch.testing.assert_close(
encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu")
)
# verify is_crowd
torch.testing.assert_close(
encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu")
)
# verify class_labels
self.assertTrue(
torch.allclose(
encoding_cpu["labels"][0]["class_labels"], encoding_gpu["labels"][0]["class_labels"].to("cpu")
)
)
# verify orig_size
torch.testing.assert_close(
encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu")
)
# verify size
torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
@is_flaky(
description="Still flaky with a failing ratio of ~0.6% after #36240",
)
def test_fast_is_faster_than_slow(self):
super().test_fast_is_faster_than_slow()
| transformers/tests/models/rt_detr/test_image_processing_rt_detr.py/0 | {
"file_path": "transformers/tests/models/rt_detr/test_image_processing_rt_detr.py",
"repo_id": "transformers",
"token_count": 9628
} | 594 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SAM2 model."""
import gc
import unittest
import requests
from transformers.testing_utils import (
backend_empty_cache,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from transformers.video_utils import load_video
if is_torch_available():
import torch
from transformers import Sam2VideoModel, Sam2VideoProcessor
if is_vision_available():
from PIL import Image
def prepare_image():
img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/truck.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_groceries_image():
img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/groceries.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_dog_img():
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_video():
video_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/bedroom.mp4"
raw_video, _ = load_video(video_url)
return raw_video
@slow
class Sam2VideoModelIntegrationTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.video_model = Sam2VideoModel.from_pretrained("facebook/sam2.1-hiera-tiny").to(torch.float32)
self.processor = Sam2VideoProcessor.from_pretrained("facebook/sam2.1-hiera-tiny")
self.video_model.to(torch_device)
self.video_model.eval()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
def test_inference_mask_generation_video_one_point(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
video_res_masks = self.processor.post_process_masks([low_res_masks], [raw_video.shape[-3:-1]], binarize=False)[
0
]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-21.4113, -21.4113, -22.9687], [-23.3090, -23.3090, -24.2606], [-27.5705, -27.5705, -27.1616]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-21.4113, -21.4113], [-23.3090, -23.3090]]]],
[[[[-20.1003, -20.1003], [-21.2294, -21.2294]]]],
[[[[-19.9619, -19.9619], [-21.3060, -21.3060]]]],
],
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_one_point_propagate_in_video_directly(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-21.4113, -21.4113], [-23.3090, -23.3090]]]],
[[[[-20.1003, -20.1003], [-21.2294, -21.2294]]]],
[[[[-19.9619, -19.9619], [-21.3060, -21.3060]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-11.1487, -11.1487, -11.4202], [-11.6522, -11.6522, -11.8057], [-12.7829, -12.7829, -12.6715]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-11.1487, -11.1487], [-11.6522, -11.6522]]]],
[[[[-15.3821, -15.3821], [-16.0333, -16.0333]]]],
[[[[-15.4855, -15.4855], [-16.4230, -16.4230]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_one_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_boxes=[[[300, 0, 500, 400]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-13.1427, -13.1427, -13.6418], [-13.7753, -13.7753, -14.1144], [-15.1957, -15.1957, -15.1757]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-13.1427, -13.1427], [-13.7753, -13.7753]]]],
[[[[-14.9998, -14.9998], [-15.7086, -15.7086]]]],
[[[[-15.4558, -15.4558], [-16.1649, -16.1649]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_one_point_one_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_boxes=[[[300, 0, 500, 400]]],
input_points=[[[[460, 60]]]],
input_labels=[[[1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-12.3525, -12.3525, -12.8907], [-13.0608, -13.0608, -13.4079], [-14.6511, -14.6511, -14.5694]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-12.3525, -12.3525], [-13.0608, -13.0608]]]],
[[[[-15.8181, -15.8181], [-16.4163, -16.4163]]]],
[[[[-15.8900, -15.8900], [-16.5953, -16.5953]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_multi_objects_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_ids = [2, 3] # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_ids,
input_points=[[[[200, 300], [230, 250], [275, 175]], [[400, 150]]]],
input_labels=[[[1, 1, 0], [1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (2, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[:, 0, :2, :2], # first object
torch.tensor(
[[[-12.6294, -12.6294], [-13.3659, -13.3659]], [[-20.3319, -20.3319], [-22.0491, -22.0491]]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-12.6294, -12.6294], [-13.3659, -13.3659]]], [[[-20.3319, -20.3319], [-22.0491, -22.0491]]]],
[[[[-18.5249, -18.5249], [-19.5830, -19.5830]]], [[[-17.5537, -17.5537], [-19.2259, -19.2259]]]],
[[[[-14.2722, -14.2722], [-15.4622, -15.4622]]], [[[-18.3185, -18.3185], [-20.0314, -20.0314]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_propagate_video_from_mask_input(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
# get input_mask
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
# set mask as input
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_masks=self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = sam2_video_output.pred_masks
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-10.0000, -10.0000], [-10.0000, -10.0000]]]],
[[[[-18.4807, -18.4807], [-19.1966, -19.1966]]]],
[[[[-20.0512, -20.0512], [-20.9110, -20.9110]]]],
],
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_propagate_on_streamed_video(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(inference_device=torch_device)
video_res_masks = []
max_frame_num_to_track = 3
for frame_idx, frame in enumerate(raw_video):
if frame_idx >= max_frame_num_to_track:
break
inputs = self.processor(images=frame, device=torch_device, return_tensors="pt")
if frame_idx == 0:
self.processor.add_inputs_to_inference_session(
inference_session,
frame_idx=0,
obj_ids=1,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
original_size=inputs.original_sizes[0],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame=inputs.pixel_values[0])
video_res_masks.append(
self.processor.post_process_masks(
[sam2_video_output.pred_masks], inputs.original_sizes, binarize=False
)[0]
)
video_res_masks = torch.stack(video_res_masks, dim=0)
self.assertEqual(
video_res_masks.shape, (max_frame_num_to_track, 1, 1, raw_video.shape[-3], raw_video.shape[-2])
)
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
video_res_masks[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-11.1487, -11.1487], [-11.6522, -11.6522]]]],
[[[[-15.3821, -15.3821], [-16.0333, -16.0333]]]],
[[[[-15.4855, -15.4855], [-16.4230, -16.4230]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
| transformers/tests/models/sam2_video/test_modeling_sam2_video.py/0 | {
"file_path": "transformers/tests/models/sam2_video/test_modeling_sam2_video.py",
"repo_id": "transformers",
"token_count": 11258
} | 595 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SegFormer model."""
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import Expectations, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class SegformerConfigTester(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, "hidden_sizes"))
self.parent.assertTrue(hasattr(config, "num_attention_heads"))
self.parent.assertTrue(hasattr(config, "num_encoder_blocks"))
class SegformerModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=64,
num_channels=3,
num_encoder_blocks=4,
depths=[1, 1, 1, 1],
sr_ratios=[8, 4, 2, 1],
hidden_sizes=[8, 8, 16, 16],
downsampling_rates=[1, 4, 8, 16],
num_attention_heads=[1, 1, 2, 2],
is_training=True,
use_labels=True,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
num_labels=3,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.num_encoder_blocks = num_encoder_blocks
self.sr_ratios = sr_ratios
self.depths = depths
self.hidden_sizes = hidden_sizes
self.downsampling_rates = downsampling_rates
self.num_attention_heads = num_attention_heads
self.is_training = is_training
self.use_labels = use_labels
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return SegformerConfig(
image_size=self.image_size,
num_channels=self.num_channels,
num_encoder_blocks=self.num_encoder_blocks,
depths=self.depths,
hidden_sizes=self.hidden_sizes,
num_attention_heads=self.num_attention_heads,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, labels):
model = SegformerModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width)
)
def create_and_check_for_image_segmentation(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = SegformerForSemanticSegmentation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4)
)
result = model(pixel_values, labels=labels)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4)
)
self.parent.assertGreater(result.loss, 0.0)
def create_and_check_for_binary_image_segmentation(self, config, pixel_values, labels):
config.num_labels = 1
model = SegformerForSemanticSegmentation(config=config)
model.to(torch_device)
model.eval()
labels = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size)).to(torch_device)
result = model(pixel_values, labels=labels)
self.parent.assertGreater(result.loss, 0.0)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"image-feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
fx_compatible = True
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = SegformerModelTester(self)
self.config_tester = SegformerConfigTester(self, config_class=SegformerConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_binary_image_segmentation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*config_and_inputs)
def test_for_image_segmentation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs)
def test_batching_equivalence(self, atol=2e-4, rtol=2e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
@unittest.skip(reason="SegFormer does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="SegFormer does not have get_input_embeddings method and get_output_embeddings methods")
def test_model_get_set_embeddings(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
expected_num_attentions = sum(self.model_tester.depths)
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
# verify the first attentions (first block, first layer)
expected_seq_len = (self.model_tester.image_size // 4) ** 2
expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len],
)
# verify the last attentions (last block, last layer)
expected_seq_len = (self.model_tester.image_size // 32) ** 2
expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]),
[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
# verify the first attentions (first block, first layer)
expected_seq_len = (self.model_tester.image_size // 4) ** 2
expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = self.model_tester.num_encoder_blocks
self.assertEqual(len(hidden_states), expected_num_layers)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]),
[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_training(self):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@slow
def test_model_from_pretrained(self):
model_name = "nvidia/segformer-b0-finetuned-ade-512-512"
model = SegformerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
class SegformerModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_image_segmentation_ade(self):
# only resize + normalize
image_processor = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False
)
model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
torch_device
)
image = prepare_img()
encoded_inputs = image_processor(images=image, return_tensors="pt")
pixel_values = encoded_inputs.pixel_values.to(torch_device)
with torch.no_grad():
outputs = model(pixel_values)
expected_shape = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
],
("cuda", 8): [
[[-4.6310, -5.5232, -6.2361], [-5.1918, -6.1445, -6.5996], [-5.4427, -6.2792, -6.7580]],
[[-12.1397, -13.3124, -13.9551], [-12.8736, -13.9347, -14.3569], [-12.9440, -13.8222, -14.2514]],
[[-12.5135, -13.4682, -14.4913], [-12.8670, -14.4339, -14.7766], [-13.2519, -14.5800, -15.0685]],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
@slow
def test_inference_image_segmentation_city(self):
# only resize + normalize
image_processor = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False
)
model = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024"
).to(torch_device)
image = prepare_img()
encoded_inputs = image_processor(images=image, return_tensors="pt")
pixel_values = encoded_inputs.pixel_values.to(torch_device)
with torch.no_grad():
outputs = model(pixel_values)
expected_shape = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([]).to(torch_device)
expectations = Expectations(
{
(None, None): [
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
],
("cuda", 8): [
[[-13.5728, -13.9089, -12.6492], [-14.3478, -15.3656, -14.2309], [-14.7512, -16.0394, -15.6065]],
[[-17.1642, -15.8704, -12.9641], [-17.2572, -17.3701, -14.8214], [-16.6043, -16.8761, -16.7425]],
[[-3.6444, -3.0189, -1.4195], [-3.0787, -3.1953, -1.9993], [-1.8755, -1.9219, -1.7002]],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3, :3], expected_slice, rtol=1e-1, atol=1e-1)
@slow
def test_post_processing_semantic_segmentation(self):
# only resize + normalize
image_processor = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False
)
model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
torch_device
)
image = prepare_img()
encoded_inputs = image_processor(images=image, return_tensors="pt")
pixel_values = encoded_inputs.pixel_values.to(torch_device)
with torch.no_grad():
outputs = model(pixel_values)
outputs.logits = outputs.logits.detach().cpu()
segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)])
expected_shape = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape, expected_shape)
segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs)
expected_shape = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape, expected_shape)
| transformers/tests/models/segformer/test_modeling_segformer.py/0 | {
"file_path": "transformers/tests/models/segformer/test_modeling_segformer.py",
"repo_id": "transformers",
"token_count": 8606
} | 596 |
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from PIL import Image
from transformers import Siglip2ImageProcessor
if is_torchvision_available():
from transformers import Siglip2ImageProcessorFast
class Siglip2ImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
resample=None,
patch_size=16,
max_num_patches=256,
):
size = size if size is not None else {"height": 18, "width": 18}
resample = resample if resample is not None else Image.Resampling.BILINEAR
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.resample = resample
self.patch_size = patch_size
self.max_num_patches = max_num_patches
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"resample": self.resample,
"patch_size": self.patch_size,
"max_num_patches": self.max_num_patches,
}
def expected_output_image_shape(self, images):
return self.max_num_patches, self.patch_size * self.patch_size * self.num_channels
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
# Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest with CLIP->Siglip2
class Siglip2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Siglip2ImageProcessor if is_vision_available() else None
fast_image_processing_class = Siglip2ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Siglip2ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
# Ignore copy
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "resample"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "patch_size"))
self.assertTrue(hasattr(image_processing, "max_num_patches"))
# Ignore copy
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.max_num_patches, 256)
self.assertEqual(image_processor.patch_size, 16)
image_processor = self.image_processing_class.from_dict(
self.image_processor_dict, patch_size=32, max_num_patches=512
)
self.assertEqual(image_processor.patch_size, 32)
self.assertEqual(image_processor.max_num_patches, 512)
@unittest.skip(reason="not supported")
# Ignore copy
def test_call_numpy_4_channels(self):
pass
# Ignore copy
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = Image.open(
requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
# increase mean tolerance to 1e-3 -> 2e-3
# Ignore copy
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
| transformers/tests/models/siglip2/test_image_processing_siglip2.py/0 | {
"file_path": "transformers/tests/models/siglip2/test_image_processing_siglip2.py",
"repo_id": "transformers",
"token_count": 3194
} | 597 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from parameterized import parameterized
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import (
ImageProcessingTestMixin,
prepare_image_inputs,
)
if is_torch_available():
import numpy as np
import torch
from transformers.models.superglue.modeling_superglue import KeypointMatchingOutput
if is_vision_available():
from transformers import SuperGlueImageProcessor
def random_array(size):
return np.random.randint(255, size=size)
def random_tensor(size):
return torch.rand(size)
class SuperGlueImageProcessingTester:
def __init__(
self,
parent,
batch_size=6,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_grayscale=True,
):
size = size if size is not None else {"height": 480, "width": 640}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_grayscale = do_grayscale
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_grayscale": self.do_grayscale,
}
def expected_output_image_shape(self, images):
return 2, self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False, pairs=True, batch_size=None):
batch_size = batch_size if batch_size is not None else self.batch_size
image_inputs = prepare_image_inputs(
batch_size=batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
if pairs:
image_inputs = [image_inputs[i : i + 2] for i in range(0, len(image_inputs), 2)]
return image_inputs
def prepare_keypoint_matching_output(self, pixel_values):
max_number_keypoints = 50
batch_size = len(pixel_values)
mask = torch.zeros((batch_size, 2, max_number_keypoints), dtype=torch.int)
keypoints = torch.zeros((batch_size, 2, max_number_keypoints, 2))
matches = torch.full((batch_size, 2, max_number_keypoints), -1, dtype=torch.int)
scores = torch.zeros((batch_size, 2, max_number_keypoints))
for i in range(batch_size):
random_number_keypoints0 = np.random.randint(10, max_number_keypoints)
random_number_keypoints1 = np.random.randint(10, max_number_keypoints)
random_number_matches = np.random.randint(5, min(random_number_keypoints0, random_number_keypoints1))
mask[i, 0, :random_number_keypoints0] = 1
mask[i, 1, :random_number_keypoints1] = 1
keypoints[i, 0, :random_number_keypoints0] = torch.rand((random_number_keypoints0, 2))
keypoints[i, 1, :random_number_keypoints1] = torch.rand((random_number_keypoints1, 2))
random_matches_indices0 = torch.randperm(random_number_keypoints1, dtype=torch.int)[:random_number_matches]
random_matches_indices1 = torch.randperm(random_number_keypoints0, dtype=torch.int)[:random_number_matches]
matches[i, 0, random_matches_indices1] = random_matches_indices0
matches[i, 1, random_matches_indices0] = random_matches_indices1
scores[i, 0, random_matches_indices1] = torch.rand((random_number_matches,))
scores[i, 1, random_matches_indices0] = torch.rand((random_number_matches,))
return KeypointMatchingOutput(mask=mask, keypoints=keypoints, matches=matches, matching_scores=scores)
@require_torch
@require_vision
class SuperGlueImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = SuperGlueImageProcessor if is_vision_available() else None
def setUp(self) -> None:
super().setUp()
self.image_processor_tester = SuperGlueImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processing(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_grayscale"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 480, "width": 640})
image_processor = self.image_processing_class.from_dict(
self.image_processor_dict, size={"height": 42, "width": 42}
)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
@unittest.skip(reason="SuperPointImageProcessor is always supposed to return a grayscaled image")
def test_call_numpy_4_channels(self):
pass
def test_number_and_format_of_images_in_input(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
# Cases where the number of images and the format of lists in the input is correct
image_input = self.image_processor_tester.prepare_image_inputs(pairs=False, batch_size=2)
image_processed = image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual((1, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape))
image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=2)
image_processed = image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual((1, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape))
image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=4)
image_processed = image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual((2, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape))
image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=6)
image_processed = image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual((3, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape))
# Cases where the number of images or the format of lists in the input is incorrect
## List of 4 images
image_input = self.image_processor_tester.prepare_image_inputs(pairs=False, batch_size=4)
with self.assertRaises(ValueError) as cm:
image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual(ValueError, cm.exception.__class__)
## List of 3 images
image_input = self.image_processor_tester.prepare_image_inputs(pairs=False, batch_size=3)
with self.assertRaises(ValueError) as cm:
image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual(ValueError, cm.exception.__class__)
## List of 2 pairs and 1 image
image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=3)
with self.assertRaises(ValueError) as cm:
image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual(ValueError, cm.exception.__class__)
@parameterized.expand(
[
([random_array((3, 100, 200)), random_array((3, 100, 200))], (1, 2, 3, 480, 640)),
([[random_array((3, 100, 200)), random_array((3, 100, 200))]], (1, 2, 3, 480, 640)),
([random_tensor((3, 100, 200)), random_tensor((3, 100, 200))], (1, 2, 3, 480, 640)),
([random_tensor((3, 100, 200)), random_tensor((3, 100, 200))], (1, 2, 3, 480, 640)),
],
)
def test_valid_image_shape_in_input(self, image_input, output):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
image_processed = image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual(output, tuple(image_processed["pixel_values"].shape))
@parameterized.expand(
[
(random_array((3, 100, 200)),),
([random_array((3, 100, 200))],),
(random_array((1, 3, 100, 200)),),
([[random_array((3, 100, 200))]],),
([[random_array((3, 100, 200))], [random_array((3, 100, 200))]],),
([random_array((1, 3, 100, 200)), random_array((1, 3, 100, 200))],),
(random_array((1, 1, 3, 100, 200)),),
],
)
def test_invalid_image_shape_in_input(self, image_input):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
with self.assertRaises(ValueError) as cm:
image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual(ValueError, cm.exception.__class__)
def test_input_images_properly_paired(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs()
pre_processed_images = image_processor.preprocess(image_inputs, return_tensors="np")
self.assertEqual(len(pre_processed_images["pixel_values"].shape), 5)
self.assertEqual(pre_processed_images["pixel_values"].shape[1], 2)
def test_input_not_paired_images_raises_error(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(pairs=False)
with self.assertRaises(ValueError):
image_processor.preprocess(image_inputs[0])
def test_input_image_properly_converted_to_grayscale(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs()
pre_processed_images = image_processor.preprocess(image_inputs)
for image_pair in pre_processed_images["pixel_values"]:
for image in image_pair:
self.assertTrue(np.all(image[0, ...] == image[1, ...]) and np.all(image[1, ...] == image[2, ...]))
def test_call_numpy(self):
# Test overwritten because SuperGlueImageProcessor combines images by pair to feed it into SuperGlue
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image_pair in image_pairs:
self.assertEqual(len(image_pair), 2)
expected_batch_size = int(self.image_processor_tester.batch_size / 2)
# Test with 2 images
encoded_images = image_processing(image_pairs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test with list of pairs
encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs)
self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape))
# Test without paired images
image_pairs = self.image_processor_tester.prepare_image_inputs(
equal_resolution=False, numpify=True, pairs=False
)
with self.assertRaises(ValueError):
image_processing(image_pairs, return_tensors="pt").pixel_values
def test_call_pil(self):
# Test overwritten because SuperGlueImageProcessor combines images by pair to feed it into SuperGlue
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image_pair in image_pairs:
self.assertEqual(len(image_pair), 2)
expected_batch_size = int(self.image_processor_tester.batch_size / 2)
# Test with 2 images
encoded_images = image_processing(image_pairs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test with list of pairs
encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs)
self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape))
# Test without paired images
image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, pairs=False)
with self.assertRaises(ValueError):
image_processing(image_pairs, return_tensors="pt").pixel_values
def test_call_pytorch(self):
# Test overwritten because SuperGlueImageProcessor combines images by pair to feed it into SuperGlue
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image_pair in image_pairs:
self.assertEqual(len(image_pair), 2)
expected_batch_size = int(self.image_processor_tester.batch_size / 2)
# Test with 2 images
encoded_images = image_processing(image_pairs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test with list of pairs
encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs)
self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape))
# Test without paired images
image_pairs = self.image_processor_tester.prepare_image_inputs(
equal_resolution=False, torchify=True, pairs=False
)
with self.assertRaises(ValueError):
image_processing(image_pairs, return_tensors="pt").pixel_values
def test_image_processor_with_list_of_two_images(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_pairs = self.image_processor_tester.prepare_image_inputs(
equal_resolution=False, numpify=True, batch_size=2, pairs=False
)
self.assertEqual(len(image_pairs), 2)
self.assertTrue(isinstance(image_pairs[0], np.ndarray))
self.assertTrue(isinstance(image_pairs[1], np.ndarray))
expected_batch_size = 1
encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0])
self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape))
@require_torch
def test_post_processing_keypoint_matching(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs()
pre_processed_images = image_processor.preprocess(image_inputs, return_tensors="pt")
outputs = self.image_processor_tester.prepare_keypoint_matching_output(**pre_processed_images)
def check_post_processed_output(post_processed_output, image_pair_size):
for post_processed_output, (image_size0, image_size1) in zip(post_processed_output, image_pair_size):
self.assertTrue("keypoints0" in post_processed_output)
self.assertTrue("keypoints1" in post_processed_output)
self.assertTrue("matching_scores" in post_processed_output)
keypoints0 = post_processed_output["keypoints0"]
keypoints1 = post_processed_output["keypoints1"]
all_below_image_size0 = torch.all(keypoints0[:, 0] <= image_size0[1]) and torch.all(
keypoints0[:, 1] <= image_size0[0]
)
all_below_image_size1 = torch.all(keypoints1[:, 0] <= image_size1[1]) and torch.all(
keypoints1[:, 1] <= image_size1[0]
)
all_above_zero0 = torch.all(keypoints0[:, 0] >= 0) and torch.all(keypoints0[:, 1] >= 0)
all_above_zero1 = torch.all(keypoints0[:, 0] >= 0) and torch.all(keypoints0[:, 1] >= 0)
self.assertTrue(all_below_image_size0)
self.assertTrue(all_below_image_size1)
self.assertTrue(all_above_zero0)
self.assertTrue(all_above_zero1)
all_scores_different_from_minus_one = torch.all(post_processed_output["matching_scores"] != -1)
self.assertTrue(all_scores_different_from_minus_one)
tuple_image_sizes = [
((image_pair[0].size[0], image_pair[0].size[1]), (image_pair[1].size[0], image_pair[1].size[1]))
for image_pair in image_inputs
]
tuple_post_processed_outputs = image_processor.post_process_keypoint_matching(outputs, tuple_image_sizes)
check_post_processed_output(tuple_post_processed_outputs, tuple_image_sizes)
tensor_image_sizes = torch.tensor(
[(image_pair[0].size, image_pair[1].size) for image_pair in image_inputs]
).flip(2)
tensor_post_processed_outputs = image_processor.post_process_keypoint_matching(outputs, tensor_image_sizes)
check_post_processed_output(tensor_post_processed_outputs, tensor_image_sizes)
| transformers/tests/models/superglue/test_image_processing_superglue.py/0 | {
"file_path": "transformers/tests/models/superglue/test_image_processing_superglue.py",
"repo_id": "transformers",
"token_count": 8009
} | 598 |
# coding=utf-8
# Copyright 2025 Google LLC and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
import numpy as np
import torch
from transformers import TimesFmConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
if is_torch_available():
from transformers import TimesFmModelForPrediction
TOLERANCE = 1e-4
class TimesFmModelTester:
def __init__(
self,
parent,
patch_length: int = 32,
context_length: int = 512,
horizon_length: int = 128,
freq_size: int = 3,
num_hidden_layers: int = 1,
hidden_size: int = 16,
intermediate_size: int = 32,
head_dim: int = 8,
num_heads: int = 2,
tolerance: float = 1e-6,
rms_norm_eps: float = 1e-6,
quantiles: list[float] = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
pad_val: float = 1123581321.0,
use_positional_embedding: bool = True,
initializer_factor: float = 0.0,
is_training: bool = False,
batch_size: int = 3,
):
self.parent = parent
self.patch_length = patch_length
self.context_length = context_length
self.horizon_length = horizon_length
self.quantiles = quantiles
self.pad_val = pad_val
self.freq_size = freq_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.head_dim = head_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_heads
self.tolerance = tolerance
self.rms_norm_eps = rms_norm_eps
self.use_positional_embedding = use_positional_embedding
self.initializer_factor = initializer_factor
self.is_training = is_training
self.batch_size = batch_size
# The size of test input
self.seq_length = context_length // patch_length
self.hidden_size = hidden_size
def get_config(self):
return TimesFmConfig(
patch_length=self.patch_length,
context_length=self.context_length,
horizon_length=self.horizon_length,
quantiles=self.quantiles,
pad_val=self.pad_val,
freq_size=self.freq_size,
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
head_dim=self.head_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
tolerance=self.tolerance,
rms_norm_eps=self.rms_norm_eps,
use_positional_embedding=self.use_positional_embedding,
initializer_factor=self.initializer_factor,
)
def get_pipeline_config(self):
return self.get_config()
def prepare_config_and_inputs(self):
forecast_input = [
torch.tensor(np.sin(np.linspace(0, 20, 100)), dtype=torch.float32, device=torch_device),
torch.tensor(np.cos(np.linspace(0, 20, 100)), dtype=torch.float32, device=torch_device),
torch.tensor(np.tan(np.linspace(0, 20, 100)), dtype=torch.float32, device=torch_device),
]
frequency_input = torch.tensor([0, 1, 2], dtype=torch.long, device=torch_device)
return (self.get_config(), torch.stack(forecast_input, dim=0), frequency_input)
def prepare_config_and_inputs_for_common(self):
(config, forecast_input, frequency_input) = self.prepare_config_and_inputs()
inputs_dict = {
"past_values": forecast_input,
"freq": frequency_input,
}
return config, inputs_dict
@require_torch
class TimesFmModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (TimesFmModelForPrediction,) if is_torch_available() else ()
all_generative_model_classes = ()
all_parallelizable_model_classes = ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_model_parallel = False
is_encoder_decoder = False
test_inputs_embeds = False
def setUp(self):
self.model_tester = TimesFmModelTester(self)
self.config_tester = ConfigTester(self, config_class=TimesFmConfig)
def test_create_and_run_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = TimesFmModelForPrediction(config)
model.to(torch_device)
model.eval()
results = model(**inputs_dict)
assert results.mean_predictions is not None
@unittest.skip(reason="Compile not yet supported because of masks")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="Model does not have input embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="Model does not have head mask")
def test_headmasking(self):
pass
# the main input name is `inputs`
def test_model_main_input_name(self):
model_signature = inspect.signature(getattr(TimesFmModelForPrediction, "forward"))
# The main input is the name of the argument after `self`
observed_main_input_name = list(model_signature.parameters.keys())[1]
self.assertEqual(TimesFmModelForPrediction.main_input_name, observed_main_input_name)
@require_torch
@slow
class TimesFmModelIntegrationTests(unittest.TestCase):
def test_inference(self):
model = TimesFmModelForPrediction.from_pretrained("google/timesfm-2.0-500m-pytorch").to(torch_device)
forecast_input = [
np.sin(np.linspace(0, 20, 100)),
np.sin(np.linspace(0, 20, 200)),
np.sin(np.linspace(0, 20, 400)),
]
forecast_input_tensor = [torch.tensor(ts, dtype=torch.float32, device=torch_device) for ts in forecast_input]
frequency_input = [0, 1, 2]
with torch.no_grad():
output = model(past_values=forecast_input_tensor, freq=frequency_input)
mean_predictions = output.mean_predictions
self.assertEqual(mean_predictions.shape, torch.Size([3, model.config.horizon_length]))
# fmt: off
expected_slice = torch.tensor(
[ 0.9813, 1.0086, 0.9985, 0.9432, 0.8505, 0.7203, 0.5596, 0.3788,
0.1796, -0.0264, -0.2307, -0.4255, -0.5978, -0.7642, -0.8772, -0.9670,
-1.0110, -1.0162, -0.9848, -0.9151, -0.8016, -0.6511, -0.4707, -0.2842,
-0.0787, 0.1260, 0.3293, 0.5104, 0.6818, 0.8155, 0.9172, 0.9843,
1.0101, 1.0025, 0.9529, 0.8588, 0.7384, 0.5885, 0.4022, 0.2099,
-0.0035, -0.2104, -0.4146, -0.6033, -0.7661, -0.8818, -0.9725, -1.0191,
-1.0190, -0.9874, -0.9137, -0.8069, -0.6683, -0.4939, -0.3086, -0.1106,
0.0846, 0.2927, 0.4832, 0.6612, 0.8031, 0.9051, 0.9772, 1.0064
],
device=torch_device)
# fmt: on
self.assertTrue(torch.allclose(mean_predictions[0, :64], expected_slice, atol=TOLERANCE))
| transformers/tests/models/timesfm/test_modeling_timesfm.py/0 | {
"file_path": "transformers/tests/models/timesfm/test_modeling_timesfm.py",
"repo_id": "transformers",
"token_count": 3456
} | 599 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Wav2Vec2-BERT model."""
import tempfile
import unittest
from datasets import load_dataset
from transformers import Wav2Vec2BertConfig, is_torch_available
from transformers.testing_utils import (
is_flaky,
require_torch,
require_torch_accelerator,
require_torch_fp16,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
AutoFeatureExtractor,
Wav2Vec2BertForAudioFrameClassification,
Wav2Vec2BertForCTC,
Wav2Vec2BertForSequenceClassification,
Wav2Vec2BertForXVector,
Wav2Vec2BertModel,
)
from transformers.models.wav2vec2.modeling_wav2vec2 import _sample_negative_indices
from transformers.models.wav2vec2_bert.modeling_wav2vec2_bert import (
_compute_mask_indices,
)
# Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerModelTester with Conformer->Bert, input_values->input_features
class Wav2Vec2BertModelTester:
# Ignore copy
def __init__(
self,
parent,
batch_size=13,
seq_length=200, # speech is longer
is_training=False,
hidden_size=16,
feature_projection_input_dim=16,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=2,
num_attention_heads=2,
hidden_dropout_prob=0.1,
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
mask_time_prob=0.5,
mask_time_length=2,
vocab_size=32,
do_stable_layer_norm=False,
num_adapter_layers=2,
adapter_stride=2,
tdnn_dim=(32, 32),
tdnn_kernel=(5, 3),
tdnn_dilation=(1, 2),
xvector_output_dim=32,
position_embeddings_type="relative",
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feature_projection_input_dim = feature_projection_input_dim
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.num_adapter_layers = num_adapter_layers
self.adapter_stride = adapter_stride
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.scope = scope
self.tdnn_dim = tdnn_dim
self.tdnn_kernel = tdnn_kernel
self.tdnn_dilation = tdnn_dilation
self.xvector_output_dim = xvector_output_dim
self.position_embeddings_type = position_embeddings_type
self.output_seq_length = self.seq_length
self.encoder_seq_length = self.output_seq_length
self.adapter_output_seq_length = self.output_seq_length
for _ in range(num_adapter_layers):
self.adapter_output_seq_length = (self.adapter_output_seq_length - 1) // adapter_stride + 1
# Ignore copy
def prepare_config_and_inputs(self, position_embeddings_type="relative"):
input_shape = [self.batch_size, self.seq_length, self.feature_projection_input_dim]
input_features = floats_tensor(input_shape, self.vocab_size)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config(position_embeddings_type=position_embeddings_type)
return config, input_features, attention_mask
# Ignore copy
def get_config(self, position_embeddings_type="relative"):
return Wav2Vec2BertConfig(
hidden_size=self.hidden_size,
feature_projection_input_dim=self.feature_projection_input_dim,
mask_time_prob=self.mask_time_prob,
mask_time_length=self.mask_time_length,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
do_stable_layer_norm=self.do_stable_layer_norm,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
num_adapter_layers=self.num_adapter_layers,
adapter_stride=self.adapter_stride,
tdnn_dim=self.tdnn_dim,
tdnn_kernel=self.tdnn_kernel,
tdnn_dilation=self.tdnn_dilation,
xvector_output_dim=self.xvector_output_dim,
position_embeddings_type=position_embeddings_type,
)
def create_and_check_model(self, config, input_features, attention_mask):
model = Wav2Vec2BertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_features, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def create_and_check_model_with_adapter(self, config, input_features, attention_mask):
config.add_adapter = True
model = Wav2Vec2BertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_features, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size)
)
def create_and_check_model_with_adapter_for_ctc(self, config, input_features, attention_mask):
config.add_adapter = True
config.output_hidden_size = 2 * config.hidden_size
model = Wav2Vec2BertForCTC(config=config)
model.to(torch_device)
model.eval()
result = model(input_features, attention_mask=attention_mask)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size)
)
# Ignore copy
def create_and_check_model_with_intermediate_ffn_before_adapter(self, config, input_features, attention_mask):
config.add_adapter = True
config.use_intermediate_ffn_before_adapter = True
model = Wav2Vec2BertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_features, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.adapter_output_seq_length, config.output_hidden_size),
)
# also try with different adapter proj dim
config.output_hidden_size = 8
model = Wav2Vec2BertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_features, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.adapter_output_seq_length, config.output_hidden_size),
)
def create_and_check_model_with_adapter_proj_dim(self, config, input_features, attention_mask):
config.add_adapter = True
config.output_hidden_size = 8
model = Wav2Vec2BertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_features, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.adapter_output_seq_length, config.output_hidden_size),
)
def create_and_check_model_float16(self, config, input_features, attention_mask):
model = Wav2Vec2BertModel(config=config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = Wav2Vec2BertModel.from_pretrained(tmpdirname, dtype=torch.float16)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_features.type(dtype=torch.float16), attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def check_ctc_loss(self, config, input_features, *args):
model = Wav2Vec2BertForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_features = input_features[:3]
# Ignore copy
attention_mask = torch.ones(input_features.shape[:2], device=torch_device, dtype=torch.long)
input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_features.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_features[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_features, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_features, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
def check_seq_classifier_loss(self, config, input_features, *args):
model = Wav2Vec2BertForSequenceClassification(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_features = input_features[:3]
# Ignore copy
attention_mask = torch.ones(input_features.shape[:2], device=torch_device, dtype=torch.long)
input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_features.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_features[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
masked_loss = model(input_features, attention_mask=attention_mask, labels=labels).loss.item()
unmasked_loss = model(input_features, labels=labels).loss.item()
self.parent.assertTrue(isinstance(masked_loss, float))
self.parent.assertTrue(isinstance(unmasked_loss, float))
self.parent.assertTrue(masked_loss != unmasked_loss)
def check_ctc_training(self, config, input_features, *args):
config.ctc_zero_infinity = True
model = Wav2Vec2BertForCTC(config=config)
model.to(torch_device)
model.train()
# Ignore copy
input_features = input_features[:3]
input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_features.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_features[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lengths are at least
# one shorter than logit lengths to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_features, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_seq_classifier_training(self, config, input_features, *args):
config.ctc_zero_infinity = True
model = Wav2Vec2BertForSequenceClassification(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_features = input_features[:3]
# Ignore copy
input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_features.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_features[i, input_lengths[i] :] = 0.0
loss = model(input_features, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_xvector_training(self, config, input_features, *args):
config.ctc_zero_infinity = True
model = Wav2Vec2BertForXVector(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_features = input_features[:3]
input_lengths = [input_features.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_features.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_features[i, input_lengths[i] :] = 0.0
loss = model(input_features, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_labels_out_of_vocab(self, config, input_features, *args):
model = Wav2Vec2BertForCTC(config)
model.to(torch_device)
model.train()
input_features = input_features[:3]
input_lengths = [input_features.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_features.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100)
with self.parent.assertRaises(ValueError):
model(input_features, labels=labels)
def prepare_config_and_inputs_for_common(self):
config, input_features, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_features": input_features, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class Wav2Vec2BertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
# Ignore copy
all_model_classes = (
(
Wav2Vec2BertForCTC,
Wav2Vec2BertModel,
Wav2Vec2BertForSequenceClassification,
Wav2Vec2BertForAudioFrameClassification,
Wav2Vec2BertForXVector,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"audio-classification": Wav2Vec2BertForSequenceClassification,
"automatic-speech-recognition": Wav2Vec2BertForCTC,
"feature-extraction": Wav2Vec2BertModel,
}
if is_torch_available()
else {}
)
test_pruning = False
test_headmasking = False
test_torchscript = False
def setUp(self):
self.model_tester = Wav2Vec2BertModelTester(self)
self.config_tester = ConfigTester(self, config_class=Wav2Vec2BertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@is_flaky(description="Get lager difference with A10 and even with the new `5e-4` still flaky")
def test_batching_equivalence(self, atol=5e-4, rtol=5e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
def test_model_with_relative(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative")
self.model_tester.create_and_check_model(*config_and_inputs)
# Ignore copy
def test_model_with_relative_key(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative_key")
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_with_rotary(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary")
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_with_no_rel_pos(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type=None)
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_with_adapter(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_adapter(*config_and_inputs)
def test_model_with_adapter_for_ctc(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs)
# Ignore copy
def test_model_with_intermediate_ffn_before_adapter(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_intermediate_ffn_before_adapter(*config_and_inputs)
def test_model_with_adapter_proj_dim(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs)
@require_torch_accelerator
@require_torch_fp16
def test_model_float16_with_relative(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative")
self.model_tester.create_and_check_model_float16(*config_and_inputs)
# Ignore copy
@require_torch_accelerator
@require_torch_fp16
def test_model_float16_with_relative_key(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative_key")
self.model_tester.create_and_check_model_float16(*config_and_inputs)
@require_torch_accelerator
@require_torch_fp16
def test_model_float16_with_rotary(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary")
self.model_tester.create_and_check_model_float16(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_seq_classifier_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_seq_classifier_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_training(*config_and_inputs)
def test_xvector_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_xvector_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# Ignore copy
@unittest.skip(reason="Wav2Vec2Bert has no inputs_embeds")
def test_inputs_embeds(self):
pass
# Ignore copy
@unittest.skip(reason="`input_ids` is renamed to `input_features`")
def test_forward_signature(self):
pass
# Ignore copy
@unittest.skip(reason="Wav2Vec2Bert has no tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
# Ignore copy
@unittest.skip(reason="Wav2Vec2Bert has no inputs_embeds")
def test_model_get_set_embeddings(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_features = inputs_dict["input_features"]
input_lengths = torch.tensor(
[input_features.shape[1] for _ in range(input_features.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_features.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = [
"conv.weight",
"conv.parametrizations.weight",
"masked_spec_embed",
"codevectors",
"quantizer.weight_proj.weight",
"project_hid.weight",
"project_hid.bias",
"project_q.weight",
"project_q.bias",
"pos_bias_v",
"pos_bias_u",
"pointwise_conv1",
"pointwise_conv2",
"feature_projection.projection.weight",
"feature_projection.projection.bias",
"objective.weight",
]
if param.requires_grad:
if any(x in name for x in uniform_init_parms):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.data.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.fill_(3)
if hasattr(module, "pos_bias_u") and module.pos_bias_u is not None:
module.pos_bias_u.data.fill_(3)
if hasattr(module, "pos_bias_v") and module.pos_bias_v is not None:
module.pos_bias_v.data.fill_(3)
if hasattr(module, "codevectors") and module.codevectors is not None:
module.codevectors.data.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
# Ignore copy
@unittest.skip(reason="Kept to make #Copied from working")
def test_mask_feature_prob_ctc(self):
pass
# Ignore copy
@unittest.skip(reason="Kept to make #Copied from working")
def test_mask_time_prob_ctc(self):
pass
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
# Ignore copy
model = Wav2Vec2BertModel.from_pretrained("facebook/w2v-bert-2.0")
self.assertIsNotNone(model)
@require_torch
# Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerUtilsTest with Conformer->Bert, input_values->input_features
class Wav2Vec2BertUtilsTest(unittest.TestCase):
def test_compute_mask_indices(self):
batch_size = 4
sequence_length = 60
mask_prob = 0.5
mask_length = 1
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
mask = torch.from_numpy(mask).to(torch_device)
self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)])
def test_compute_mask_indices_low_prob(self):
# with these settings num_masked_spans=0.5, which means probabilistic rounding
# ensures that in 5 out of 10 method calls, num_masked_spans=0, and in
# the other 5 out of 10, cases num_masked_spans=1
n_trials = 100
batch_size = 4
sequence_length = 100
mask_prob = 0.05
mask_length = 10
count_dimensions_masked = 0
count_dimensions_not_masked = 0
for _ in range(n_trials):
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
mask = torch.from_numpy(mask).to(torch_device)
num_masks = torch.sum(mask).item()
if num_masks > 0:
count_dimensions_masked += 1
else:
count_dimensions_not_masked += 1
# as we test for at least 10 masked dimension and at least
# 10 non-masked dimension, this test could fail with probability:
# P(100 coin flips, at most 9 heads) = 1.66e-18
self.assertGreater(count_dimensions_masked, int(n_trials * 0.1))
self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1))
def test_compute_mask_indices_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
mask = torch.from_numpy(mask).to(torch_device)
# because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
def test_compute_mask_indices_attn_mask_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device)
attention_mask[:2, sequence_length // 2 :] = 0
mask = _compute_mask_indices(
(batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask
)
mask = torch.from_numpy(mask).to(torch_device)
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0)
def test_compute_mask_indices_short_audio(self):
batch_size = 4
sequence_length = 100
mask_prob = 0.05
mask_length = 10
attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device)
# force one example to be heavily padded
attention_mask[0, 5:] = 0
mask = _compute_mask_indices(
(batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2
)
# make sure that non-padded examples cannot be padded
self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any())
# Ignore copy
@unittest.skip(reason="Kept to make #Copied from working. Test a class used for pretraining, not yet supported.")
def test_compute_perplexity(self):
pass
def test_sample_negatives(self):
batch_size = 2
sequence_length = 10
hidden_size = 4
num_negatives = 3
features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view(
sequence_length, hidden_size
) # each value in vector consists of same value
features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous()
# sample negative indices
sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None)
sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device)
negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)]
negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3)
self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))
# make sure no negatively sampled vector is actually a positive one
for negative in negatives:
self.assertTrue(((negative - features) == 0).sum() == 0.0)
# make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim
self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1))
def test_sample_negatives_with_mask(self):
batch_size = 2
sequence_length = 10
hidden_size = 4
num_negatives = 3
# second half of last input tensor is padded
mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device)
mask[-1, sequence_length // 2 :] = 0
features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view(
sequence_length, hidden_size
) # each value in vector consists of same value
features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous()
# replace masked feature vectors with -100 to test that those are not sampled
features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100)
# sample negative indices
sampled_negative_indices = _sample_negative_indices(
(batch_size, sequence_length), num_negatives, mask.cpu().numpy()
)
sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device)
negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)]
negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3)
self.assertTrue((negatives >= 0).all().item())
self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))
# make sure no negatively sampled vector is actually a positive one
for negative in negatives:
self.assertTrue(((negative - features) == 0).sum() == 0.0)
# make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim
self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1))
@require_torch
@slow
class Wav2Vec2BertModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)])
speech_samples = speech_samples[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_inference_w2v2_bert(self):
model = Wav2Vec2BertModel.from_pretrained("facebook/w2v-bert-2.0")
model.to(torch_device)
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0")
input_speech = self._load_datasamples(2)
inputs = feature_extractor(input_speech, return_tensors="pt", padding=True).to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**inputs, output_attentions=True)
# fmt: off
expected_slice_0 = torch.tensor(
[[-0.0098, -0.0570, -0.1286, 0.0439, -0.1037, -0.0235],
[-0.0767, 0.0574, -0.3224, 0.0482, 0.0440, -0.0193],
[ 0.0220, -0.0878, -0.2027, -0.0028, -0.0666, 0.0721],
[ 0.0307, -0.1099, 0.0273, -0.0416, -0.0715, 0.0094],
[ 0.0758, -0.0291, 0.1084, 0.0004, -0.0751, -0.0116],
[ 0.0349, -0.0343, -0.0098, 0.0415, -0.0617, 0.0241],
[-0.0193, -0.0171, 0.1965, 0.0797, -0.0308, 0.2033],
[-0.0323, -0.0315, 0.0948, 0.0944, -0.0254, 0.1241],
[-0.0493, 0.0010, -0.1762, 0.0034, -0.0787, 0.0832],
[ 0.0043, -0.1228, -0.0739, 0.0266, -0.0337, -0.0068]]
).to(torch_device)
# fmt: on
# fmt: off
expected_slice_1 = torch.tensor(
[[-0.0348, -0.0521, -0.3036, 0.0285, -0.0715, -0.0453],
[-0.0102, 0.0114, -0.3266, 0.0027, -0.0558, 0.0038],
[ 0.0454, 0.0148, -0.2418, -0.0392, -0.0455, 0.0478],
[-0.0013, 0.0825, -0.1730, -0.0091, -0.0426, 0.0360],
[-0.0227, 0.0687, -0.1168, 0.0569, -0.0160, 0.0759],
[-0.0318, 0.0562, -0.0508, 0.0605, 0.0150, 0.0953],
[-0.0415, 0.0438, 0.0233, 0.0336, 0.0262, 0.0860],
[-0.0163, 0.0048, 0.0807, 0.0119, 0.0712, 0.0158],
[ 0.0244, -0.0145, 0.0262, -0.0237, 0.0283, -0.0125],
[-0.0587, -0.0516, -0.0368, -0.0196, 0.0307, -0.1434]]
).to(torch_device)
# fmt: on
self.assertTrue((outputs.last_hidden_state[0, 25:35, 4:10] - expected_slice_0).abs().max() <= 1e-4)
self.assertTrue((outputs.last_hidden_state[1, 25:35, 4:10] - expected_slice_1).abs().max() <= 1e-4)
self.assertAlmostEqual(outputs.last_hidden_state[1].mean().item(), 3.3123e-05)
self.assertAlmostEqual(outputs.last_hidden_state[1].std().item(), 0.1545, delta=2e-5)
self.assertListEqual(list(outputs.last_hidden_state.shape), [2, 326, 1024])
| transformers/tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py/0 | {
"file_path": "transformers/tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py",
"repo_id": "transformers",
"token_count": 16401
} | 600 |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch XCLIP model."""
import inspect
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig
from transformers.testing_utils import (
Expectations,
require_torch,
require_torch_multi_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import XCLIPModel, XCLIPTextModel, XCLIPVisionModel
if is_vision_available():
from transformers import XCLIPProcessor
class XCLIPVisionModelTester:
def __init__(
self,
parent,
batch_size=8,
image_size=30,
patch_size=2,
num_channels=3,
num_frames=8, # important; the batch size * time must be divisible by the number of frames
is_training=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
mit_hidden_size=64,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_frames = num_frames
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.mit_hidden_size = mit_hidden_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[self.batch_size * self.num_frames, self.num_channels, self.image_size, self.image_size]
)
config = self.get_config()
return config, pixel_values
def get_config(self):
return XCLIPVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
num_frames=self.num_frames,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
mit_hidden_size=self.mit_hidden_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = XCLIPVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size * self.num_frames, num_patches + 1, self.hidden_size)
)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_frames, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class XCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as X-CLIP does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (XCLIPVisionModel,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = XCLIPVisionModelTester(self)
self.config_tester = ConfigTester(
self, config_class=XCLIPVisionConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="X-CLIP does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/xclip-base-patch32"
model = XCLIPVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_gradient_checkpointing_backward_compatibility(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if not model_class.supports_gradient_checkpointing:
continue
print("Model class:", model_class)
config.gradient_checkpointing = True
model = model_class(config)
self.assertTrue(model.is_gradient_checkpointing)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# we add 1 here due to the special message token in X-CLIP's vision encoder
seq_len = getattr(self.model_tester, "seq_length", None) + 1
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(outputs.attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length],
)
@require_torch_multi_gpu
def test_multi_gpu_data_parallel_forward(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# some params shouldn't be scattered by nn.DataParallel
# so just remove them if they are present.
blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"]
for k in blacklist_non_batched_params:
inputs_dict.pop(k, None)
# move input tensors to cuda:O
for k, v in inputs_dict.items():
if torch.is_tensor(v):
inputs_dict[k] = v.to(0)
for model_class in self.all_model_classes:
model = model_class(config=config)
model.to(0)
model.eval()
# Wrap model in nn.DataParallel
model = nn.DataParallel(model)
with torch.no_grad():
test = self._prepare_for_class(inputs_dict, model_class)
for k, v in test.items():
if isinstance(v, torch.Tensor):
print(k, v.shape)
else:
print(k, v)
_ = model(**self._prepare_for_class(inputs_dict, model_class))
class XCLIPTextModelTester:
def __init__(
self,
parent,
batch_size=8,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return XCLIPTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = XCLIPTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class XCLIPTextModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (XCLIPTextModel,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = XCLIPTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=XCLIPTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="X-CLIP does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/xclip-base-patch32"
model = XCLIPTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class XCLIPModelTester:
def __init__(
self,
parent,
text_kwargs=None,
vision_kwargs=None,
projection_dim=64,
mit_hidden_size=64,
is_training=True,
):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.projection_dim = projection_dim
self.mit_hidden_size = mit_hidden_size
self.text_model_tester = XCLIPTextModelTester(parent, **text_kwargs)
self.vision_model_tester = XCLIPVisionModelTester(parent, **vision_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.is_training = is_training
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, _ = self.vision_model_tester.prepare_config_and_inputs()
pixel_values = floats_tensor(
[
self.vision_model_tester.batch_size,
self.vision_model_tester.num_frames,
self.vision_model_tester.num_channels,
self.vision_model_tester.image_size,
self.vision_model_tester.image_size,
]
)
config = self.get_config()
return config, input_ids, attention_mask, pixel_values
def get_config(self):
return XCLIPConfig(
text_config=self.text_model_tester.get_config().to_dict(),
vision_config=self.vision_model_tester.get_config().to_dict(),
projection_dim=self.projection_dim,
)
def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):
model = XCLIPModel(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, pixel_values, attention_mask)
self.parent.assertEqual(
result.logits_per_video.shape,
(self.vision_model_tester.batch_size, self.text_model_tester.batch_size),
)
self.parent.assertEqual(
result.logits_per_text.shape,
(self.text_model_tester.batch_size, self.vision_model_tester.batch_size),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
"return_loss": True,
}
return config, inputs_dict
@require_torch
class XCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (XCLIPModel,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": XCLIPModel} if is_torch_available() else {}
fx_compatible = False
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_attention_outputs = False
test_torchscript = False
maxdiff = None
def setUp(self):
self.model_tester = XCLIPModelTester(self)
common_properties = ["projection_dim", "prompt_layers", "prompt_num_attention_heads"]
self.config_tester = ConfigTester(
self, config_class=XCLIPConfig, has_text_modality=False, common_properties=common_properties
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="XCLIPModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="XCLIPModel does not support feedforward chunking")
def test_feed_forward_chunking(self):
pass
# override as the `logit_scale`, `prompts_generator.alpha` parameters require special treatment
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initialized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
np.log(1 / 0.07),
delta=1e-3,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
elif name == "prompts_generator.alpha":
self.assertAlmostEqual(param.data.mean().item(), model.config.prompt_alpha)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
configs_no_init.return_dict = False
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
try:
input_ids = inputs_dict["input_ids"]
pixel_values = inputs_dict["pixel_values"] # X-CLIP needs pixel_values
traced_model = torch.jit.trace(model, (input_ids, pixel_values))
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
non_persistent_buffers = {}
for key in loaded_model_state_dict:
if key not in model_state_dict:
non_persistent_buffers[key] = loaded_model_state_dict[key]
loaded_model_state_dict = {
key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers
}
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save XCLIPConfig and check if we can load XCLIPVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = XCLIPVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save XCLIPConfig and check if we can load XCLIPTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = XCLIPTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/xclip-base-patch32"
model = XCLIPModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on a spaghetti video
def prepare_video():
file = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_8_frames.npy", repo_type="dataset"
)
video = np.load(file)
return list(video)
@require_vision
@require_torch
class XCLIPModelIntegrationTest(unittest.TestCase):
@slow
def test_inference(self):
model_name = "microsoft/xclip-base-patch32"
model = XCLIPModel.from_pretrained(model_name).to(torch_device)
processor = XCLIPProcessor.from_pretrained(model_name)
video = prepare_video()
inputs = processor(
text=["playing sports", "eating spaghetti", "go shopping"], videos=video, return_tensors="pt", padding=True
).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
self.assertEqual(
outputs.logits_per_video.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[14.0181, 20.2771, 14.4776]], device=torch_device)
torch.testing.assert_close(outputs.logits_per_video, expected_logits, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
# XCLIP models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
model = XCLIPModel.from_pretrained("microsoft/xclip-base-patch32").to(torch_device)
processor = XCLIPProcessor.from_pretrained(
"microsoft/xclip-base-patch32", size=180, crop_size={"height": 180, "width": 180}
)
video = prepare_video()
inputs = processor(text="what's in the video", videos=video, return_tensors="pt").to(torch_device)
# interpolate_pos_encodiung false should return value error
with self.assertRaises(ValueError, msg="doesn't match model"):
with torch.no_grad():
model(**inputs, interpolate_pos_encoding=False)
# forward pass
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((8, 26, 768))
self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
expectations = Expectations(
{
(None, None): [[0.0126, 0.2109, 0.0609], [0.0448, 0.5862, -0.1688], [-0.0881, 0.8525, -0.3044]],
("cuda", 8): [[0.0126, 0.2109, 0.0609], [0.0448, 0.5862, -0.1688], [-0.0881, 0.8525, -0.3044]],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(
outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4
)
| transformers/tests/models/x_clip/test_modeling_x_clip.py/0 | {
"file_path": "transformers/tests/models/x_clip/test_modeling_x_clip.py",
"repo_id": "transformers",
"token_count": 13360
} | 601 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch ZoeDepth model."""
import unittest
import numpy as np
from transformers import Dinov2Config, ZoeDepthConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils.import_utils import get_torch_major_and_minor_version
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ZoeDepthForDepthEstimation
if is_vision_available():
from PIL import Image
from transformers import ZoeDepthImageProcessor
class ZoeDepthModelTester:
def __init__(
self,
parent,
batch_size=2,
num_channels=3,
image_size=32,
patch_size=16,
use_labels=True,
num_labels=3,
is_training=True,
hidden_size=4,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=8,
out_features=["stage1", "stage2"],
apply_layernorm=False,
reshape_hidden_states=False,
neck_hidden_sizes=[2, 2],
fusion_hidden_size=6,
bottleneck_features=6,
num_out_features=[6, 6, 6, 6],
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.out_features = out_features
self.apply_layernorm = apply_layernorm
self.reshape_hidden_states = reshape_hidden_states
self.use_labels = use_labels
self.num_labels = num_labels
self.is_training = is_training
self.neck_hidden_sizes = neck_hidden_sizes
self.fusion_hidden_size = fusion_hidden_size
self.bottleneck_features = bottleneck_features
self.num_out_features = num_out_features
# ZoeDepth's sequence length
self.seq_length = (self.image_size // self.patch_size) ** 2 + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return ZoeDepthConfig(
backbone_config=self.get_backbone_config(),
backbone=None,
neck_hidden_sizes=self.neck_hidden_sizes,
fusion_hidden_size=self.fusion_hidden_size,
bottleneck_features=self.bottleneck_features,
num_out_features=self.num_out_features,
)
def get_backbone_config(self):
return Dinov2Config(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
is_training=self.is_training,
out_features=self.out_features,
reshape_hidden_states=self.reshape_hidden_states,
)
def create_and_check_for_depth_estimation(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = ZoeDepthForDepthEstimation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class ZoeDepthModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ZoeDepth does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (ZoeDepthForDepthEstimation,) if is_torch_available() else ()
pipeline_model_mapping = {"depth-estimation": ZoeDepthForDepthEstimation} if is_torch_available() else {}
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
# `strict=True/False` are both failing with torch 2.7, see #38677
test_torch_exportable = get_torch_major_and_minor_version() != "2.7"
def setUp(self):
self.model_tester = ZoeDepthModelTester(self)
self.config_tester = ConfigTester(
self, config_class=ZoeDepthConfig, has_text_modality=False, hidden_size=37, common_properties=[]
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="ZoeDepth with AutoBackbone does not have a base model and hence no input_embeddings")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="ZoeDepth with AutoBackbone does not have a base model and hence no input_embeddings")
def test_model_get_set_embeddings(self):
pass
def test_for_depth_estimation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs)
@unittest.skip(reason="ZoeDepth with AutoBackbone does not have a base model and hence no input_embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="ZoeDepth does not support training yet")
def test_training(self):
pass
@unittest.skip(reason="ZoeDepth does not support training yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="ZoeDepth does not support training yet")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="ZoeDepth does not support training yet")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "Intel/zoedepth-nyu"
model = ZoeDepthForDepthEstimation.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
class ZoeDepthModelIntegrationTest(unittest.TestCase):
expected_slice_post_processing = {
(False, False): [
[[1.1348238, 1.1193453, 1.130562], [1.1754476, 1.1613507, 1.1701596], [1.2287744, 1.2101802, 1.2148322]],
[[2.7170, 2.6550, 2.6839], [2.9827, 2.9438, 2.9587], [3.2340, 3.1817, 3.1602]],
],
(False, True): [
[[1.0610938, 1.1042216, 1.1429265], [1.1099341, 1.148696, 1.1817775], [1.1656011, 1.1988826, 1.2268101]],
[[2.5848, 2.7391, 2.8694], [2.7882, 2.9872, 3.1244], [2.9436, 3.1812, 3.3188]],
],
(True, False): [
[[1.8382794, 1.8380532, 1.8375976], [1.848761, 1.8485023, 1.8479986], [1.8571457, 1.8568444, 1.8562847]],
[[6.2030, 6.1902, 6.1777], [6.2303, 6.2176, 6.2053], [6.2561, 6.2436, 6.2312]],
],
(True, True): [
[[1.8306141, 1.8305621, 1.8303483], [1.8410318, 1.8409299, 1.8406585], [1.8492792, 1.8491366, 1.8488203]],
[[6.2616, 6.2520, 6.2435], [6.2845, 6.2751, 6.2667], [6.3065, 6.2972, 6.2887]],
],
} # (pad, flip)
def test_inference_depth_estimation(self):
image_processor = ZoeDepthImageProcessor.from_pretrained("Intel/zoedepth-nyu")
model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu").to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
predicted_depth = outputs.predicted_depth
# verify the predicted depth
expected_shape = torch.Size((1, 384, 512))
self.assertEqual(predicted_depth.shape, expected_shape)
expected_slice = torch.tensor(
[[1.0020, 1.0219, 1.0389], [1.0349, 1.0816, 1.1000], [1.0576, 1.1094, 1.1249]],
).to(torch_device)
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_depth_estimation_multiple_heads(self):
image_processor = ZoeDepthImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti")
model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti").to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
predicted_depth = outputs.predicted_depth
# verify the predicted depth
expected_shape = torch.Size((1, 384, 512))
self.assertEqual(predicted_depth.shape, expected_shape)
expected_slice = torch.tensor(
[[1.1571, 1.1438, 1.1783], [1.2163, 1.2036, 1.2320], [1.2688, 1.2461, 1.2734]],
).to(torch_device)
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def check_target_size(
self,
image_processor,
pad_input,
images,
outputs,
raw_outputs,
raw_outputs_flipped=None,
):
outputs_large = image_processor.post_process_depth_estimation(
raw_outputs,
[img.size[::-1] for img in images],
outputs_flipped=raw_outputs_flipped,
target_sizes=[tuple(np.array(img.size[::-1]) * 2) for img in images],
do_remove_padding=pad_input,
)
for img, out, out_l in zip(images, outputs, outputs_large):
out = out["predicted_depth"]
out_l = out_l["predicted_depth"]
out_l_reduced = torch.nn.functional.interpolate(
out_l.unsqueeze(0).unsqueeze(1), size=img.size[::-1], mode="bicubic", align_corners=False
)
out_l_reduced = out_l_reduced.squeeze(0).squeeze(0)
torch.testing.assert_close(out, out_l_reduced, rtol=2e-2, atol=2e-2)
def check_post_processing_test(self, image_processor, images, model, pad_input=True, flip_aug=True):
inputs = image_processor(images=images, return_tensors="pt", do_pad=pad_input).to(torch_device)
with torch.no_grad():
raw_outputs = model(**inputs)
raw_outputs_flipped = None
if flip_aug:
raw_outputs_flipped = model(pixel_values=torch.flip(inputs.pixel_values, dims=[3]))
outputs = image_processor.post_process_depth_estimation(
raw_outputs,
[img.size[::-1] for img in images],
outputs_flipped=raw_outputs_flipped,
do_remove_padding=pad_input,
)
expected_slices = torch.tensor(self.expected_slice_post_processing[pad_input, flip_aug]).to(torch_device)
for img, out, expected_slice in zip(images, outputs, expected_slices):
out = out["predicted_depth"]
self.assertTrue(img.size == out.shape[::-1])
torch.testing.assert_close(expected_slice, out[:3, :3], rtol=1e-3, atol=1e-3)
self.check_target_size(image_processor, pad_input, images, outputs, raw_outputs, raw_outputs_flipped)
def test_post_processing_depth_estimation_post_processing_nopad_noflip(self):
images = [prepare_img(), Image.open("./tests/fixtures/tests_samples/COCO/000000004016.png")]
image_processor = ZoeDepthImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti", keep_aspect_ratio=False)
model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti").to(torch_device)
self.check_post_processing_test(image_processor, images, model, pad_input=False, flip_aug=False)
def test_inference_depth_estimation_post_processing_nopad_flip(self):
images = [prepare_img(), Image.open("./tests/fixtures/tests_samples/COCO/000000004016.png")]
image_processor = ZoeDepthImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti", keep_aspect_ratio=False)
model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti").to(torch_device)
self.check_post_processing_test(image_processor, images, model, pad_input=False, flip_aug=True)
def test_inference_depth_estimation_post_processing_pad_noflip(self):
images = [prepare_img(), Image.open("./tests/fixtures/tests_samples/COCO/000000004016.png")]
image_processor = ZoeDepthImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti", keep_aspect_ratio=False)
model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti").to(torch_device)
self.check_post_processing_test(image_processor, images, model, pad_input=True, flip_aug=False)
def test_inference_depth_estimation_post_processing_pad_flip(self):
images = [prepare_img(), Image.open("./tests/fixtures/tests_samples/COCO/000000004016.png")]
image_processor = ZoeDepthImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti", keep_aspect_ratio=False)
model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti").to(torch_device)
self.check_post_processing_test(image_processor, images, model, pad_input=True, flip_aug=True)
| transformers/tests/models/zoedepth/test_modeling_zoedepth.py/0 | {
"file_path": "transformers/tests/models/zoedepth/test_modeling_zoedepth.py",
"repo_id": "transformers",
"token_count": 6362
} | 602 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.