repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/colpali/convert_colpali_weights_to_hf.py
src/transformers/models/colpali/convert_colpali_weights_to_hf.py
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert ColPali weights from the original repository to the HF model format. Original repository: https://github.com/illuin-tech/colpali. NOTE: This script was originally run using `torch==2.5.1` and with: ```bash python src/transformers/models/colpali/convert_colpali_weights_to_hf.py \ --model_id vidore/colpali-v1.2-merged \ --revision 89fd9736194236a1ecb7a9ec9b04f537f6f896af \ --original_vlm_name_or_path google/paligemma-3b-mix-448 \ --output_dir vidore/colpali-v1.2-hf-internal \ --push_to_hub python src/transformers/models/colpali/convert_colpali_weights_to_hf.py \ --model_id vidore/colpali-v1.3-merged \ --revision 5b955e3415a7c5468ab33119d98d6d45c3a5b2c3 \ --original_vlm_name_or_path google/paligemma-3b-mix-448 \ --output_dir vidore/colpali-v1.3-hf \ --push_to_hub ``` """ import argparse import glob from pathlib import Path from typing import Any, Optional import torch from huggingface_hub import snapshot_download from safetensors import safe_open from transformers import AutoConfig from transformers.models.colpali import ColPaliForRetrieval from transformers.models.colpali.configuration_colpali import ColPaliConfig from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) ORIGINAL_DTYPE = torch.bfloat16 def rename_state_dict_keys(state_dict: dict[str, Any]) -> dict[str, Any]: new_state_dict = {} for key, value in state_dict.items(): new_key = key if key.startswith("custom_text_proj"): new_key = key.replace("custom_text_proj", "embedding_proj_layer") if key.startswith("model."): new_key = key.replace("model.", "vlm.", 1) new_state_dict[new_key] = value return new_state_dict def load_original_state_dict(model_id: str, revision: Optional[str] = None) -> dict[str, torch.Tensor]: directory_path = snapshot_download( repo_id=model_id, revision=revision, allow_patterns=["*.safetensors"], ) original_state_dict = {} for path in glob.glob(f"{directory_path}/*"): if path.endswith(".safetensors"): with safe_open(path, framework="pt", device="cpu") as f: for key in f.keys(): original_state_dict[key] = f.get_tensor(key) # Some weights are tied, so `lm.head`` is not saved. Let's clone to load state dict. if "lm_head.weight" not in original_state_dict: original_state_dict["vlm.language_model.lm_head.weight"] = original_state_dict[ "model.language_model.model.embed_tokens.weight" ].clone() return original_state_dict @torch.no_grad() def convert_colpali_weights_to_hf( model_id: str, output_dir: str, push_to_hub: bool, revision: Optional[str] = None, original_vlm_name_or_path: Optional[str] = None, ): # Load the original model data original_config = AutoConfig.from_pretrained( model_id, revision=revision, ) if original_vlm_name_or_path is not None: original_config._name_or_path = original_vlm_name_or_path if hasattr(original_config, "architectures"): delattr(original_config, "architectures") original_state_dict = load_original_state_dict(model_id, revision=revision) # Format the state_dict keys original_state_dict = rename_state_dict_keys(original_state_dict) # Create the new config config = ColPaliConfig( vlm_config=original_config, embedding_dim=128, # hardcoded in the original model ) config.model_type = "colpali" config.is_composition = False # Load the untrained model model = ColPaliForRetrieval(config=config).to("cpu").eval() print("Created model with new config and randomly initialized weights") # NOTE: The model was initialized with float32 weights. We need to convert it to the desired precision. # There are two ways to set the model's dtype: # - Using `model.from_pretrained(..., dtype=dtype_precision)` doesn't convert the hyperparameters to the desired precision. # - Using `model.to(dtype_precision)` converts all values - including the hyperparameters - to the desired precision. # The following snippet allows a fine-grained control over the model's dtype, making sure that all # the new weights' dtypes match the original model. for param in model.parameters(): param.data = param.data.to(ORIGINAL_DTYPE) print(f"Converted the new model weights to `{ORIGINAL_DTYPE}`") # Load the original weights model.load_state_dict(original_state_dict) print("Loaded original model weights") # Tie the weights (following ColPali's `__init__`` step) if model.vlm.language_model._tied_weights_keys is not None: prefix = "vlm.language_model." prefixed_mapping = { f"{prefix}{target}": f"{prefix}{source}" for target, source in model.vlm.language_model._tied_weights_keys.items() } if isinstance(model._tied_weights_keys, dict): model._tied_weights_keys.update(prefixed_mapping) else: model._tied_weights_keys = prefixed_mapping # Sanity check: ensure all keys are the same state_dict_keys_old = set(original_state_dict.keys()) state_dict_keys_new = set(model.state_dict().keys()) disjoint_keys = state_dict_keys_old.symmetric_difference(state_dict_keys_new) if disjoint_keys: raise ValueError(f"Incompatible keys: {disjoint_keys}") # Save the model if push_to_hub: model.push_to_hub(output_dir, private=True) print(f"Model pushed to the hub at `{output_dir}`") else: Path(output_dir).mkdir(exist_ok=True, parents=True) model.save_pretrained(output_dir) print(f"Model saved to `{output_dir}`") if __name__ == "__main__": parser = argparse.ArgumentParser( description=""" This script converts the original ColPali model to the HF model format. Example usage: ```bash python src/transformers/models/colpali/convert_colpali_weights_to_hf.py \ --model_id vidore/colpali-v1.2-merged \ --revision 89fd9736194236a1ecb7a9ec9b04f537f6f896af \ --original_vlm_name_or_path google/paligemma-3b-mix-448 \ --output_dir vidore/colpali-v1.2-hf \ --push_to_hub ``` """ ) parser.add_argument( "--model_id", help="Model ID of the original model to convert", ) parser.add_argument( "--output_dir", help="Location to write HF model and tokenizer", ) parser.add_argument( "--push_to_hub", help="Whether or not to push the model to the hub at `output_dir` instead of saving it locally", action="store_true", default=False, ) parser.add_argument( "--revision", help="Revision of the model to download", default=None, ) parser.add_argument( "--original_vlm_name_or_path", help="Name or path of the original VLM backbone model", default=None, ) args = parser.parse_args() convert_colpali_weights_to_hf( model_id=args.model_id, output_dir=args.output_dir, push_to_hub=args.push_to_hub, revision=args.revision, original_vlm_name_or_path=args.original_vlm_name_or_path, )
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/colpali/__init__.py
src/transformers/models/colpali/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_colpali import * from .modeling_colpali import * from .processing_colpali import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/colpali/processing_colpali.py
src/transformers/models/colpali/processing_colpali.py
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/colpali/modular_colpali.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_colpali.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, make_flat_list_of_images from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import AddedToken, PreTokenizedInput, TextInput from ...utils import is_torch_available if is_torch_available(): import torch class ColPaliProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "padding": "longest", }, "images_kwargs": { "data_format": "channels_first", "do_convert_rgb": True, }, "common_kwargs": {"return_tensors": "pt"}, } IMAGE_TOKEN = "<image>" EXTRA_TOKENS = [f"<loc{i:0>4}>" for i in range(1024)] + [f"<seg{i:0>3}>" for i in range(128)] def build_string_from_input(prompt, bos_token, image_seq_len, image_token, num_images): """ Builds a string from the input prompt and image tokens. For example, for the call: build_string_from_input( prompt="Prefix str" bos_token="<s>", image_seq_len=3, image_token="<im>", ) The output will be: "<im><im><im><s>Initial str" Args: prompt (`list[Union[str, ImageInput]]`): The input prompt. bos_token (`str`): The beginning of sentence token. image_seq_len (`int`): The length of the image sequence. image_token (`str`): The image token. num_images (`int`): Number of images in the prompt. """ return f"{image_token * image_seq_len * num_images}{bos_token}{prompt}\n" class ColPaliProcessor(ProcessorMixin): r""" Constructs a ColPali processor which wraps a PaliGemmaProcessor and special methods to process images and queries, as well as to compute the late-interaction retrieval score. [`ColPaliProcessor`] offers all the functionalities of [`PaliGemmaProcessor`]. See the [`~PaliGemmaProcessor.__call__`] for more information. Args: image_processor ([`SiglipImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. visual_prompt_prefix (`str`, *optional*, defaults to `"Describe the image."`): A string that gets tokenized and prepended to the image tokens. query_prefix (`str`, *optional*, defaults to `"Question: "`): A prefix to be used for the query. """ def __init__( self, image_processor=None, tokenizer=None, chat_template=None, visual_prompt_prefix: str = "Describe the image.", query_prefix: str = "Question: ", ): self.visual_prompt_prefix = visual_prompt_prefix self.query_prefix = query_prefix if not hasattr(image_processor, "image_seq_length"): raise ValueError("Image processor is missing an `image_seq_length` attribute.") self.image_seq_length = image_processor.image_seq_length if not hasattr(tokenizer, "image_token"): image_token = AddedToken(IMAGE_TOKEN, normalized=False, special=True) tokens_to_add = {"additional_special_tokens": [image_token]} tokenizer.add_special_tokens(tokens_to_add) self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) self.image_token = IMAGE_TOKEN else: self.image_token_id = tokenizer.image_token_id self.image_token = tokenizer.image_token tokenizer.add_tokens(EXTRA_TOKENS) tokenizer.add_bos_token = False tokenizer.add_eos_token = False super().__init__(image_processor, tokenizer, chat_template=chat_template) def __call__( self, images: Optional[ImageInput] = None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, **kwargs: Unpack[ColPaliProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model either (1) one or several texts, either (2) one or several image(s). This method is a custom wrapper around the PaliGemmaProcessor's [`~PaliGemmaProcessor.__call__`] method adapted for the ColPali model. It cannot process both text and images at the same time. When preparing the text(s), this method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`]. When preparing the image(s), this method forwards the `images` and `kwargs` arguments to SiglipImageProcessor's [`~SiglipImageProcessor.__call__`]. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs( ColPaliProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) suffix = output_kwargs["text_kwargs"].pop("suffix", None) return_token_type_ids = True if text is None and images is None: raise ValueError("Either text or images must be provided") if text is not None and images is not None: raise ValueError("Only one of text or images can be processed at a time") if images is not None: images = self.image_processor.fetch_images(images) images = make_flat_list_of_images(images) texts_doc = [self.visual_prompt_prefix] * len(images) images = [image.convert("RGB") for image in images] input_strings = [ build_string_from_input( prompt=prompt, bos_token=self.tokenizer.bos_token, image_seq_len=self.image_seq_length, image_token=IMAGE_TOKEN, num_images=len(image_list) if isinstance(image_list, list) else 1, ) for prompt, image_list in zip(texts_doc, images) ] pixel_values = self.image_processor(images, **output_kwargs["images_kwargs"])["pixel_values"] # max_length has to account for the image tokens if output_kwargs["text_kwargs"].get("max_length", None) is not None: output_kwargs["text_kwargs"]["max_length"] += self.image_seq_length inputs = self.tokenizer( input_strings, return_token_type_ids=return_token_type_ids, **output_kwargs["text_kwargs"], ) return_data = {**inputs, "pixel_values": pixel_values} if return_token_type_ids: labels = inputs["input_ids"].masked_fill(inputs["token_type_ids"] == 0, -100) return_data.update({"labels": labels}) return BatchFeature(data=return_data) elif text is not None: if isinstance(text, str): text = [text] elif not (isinstance(text, list) and isinstance(text[0], str)): raise ValueError("Text must be a string or a list of strings") if suffix is None: suffix = self.query_augmentation_token * 10 texts_query: list[str] = [] for query in text: query = self.tokenizer.bos_token + self.query_prefix + query + suffix + "\n" texts_query.append(query) output_kwargs["text_kwargs"]["max_length"] = output_kwargs["text_kwargs"].get("max_length", 50) batch_query = self.tokenizer( texts_query, return_token_type_ids=return_token_type_ids, **output_kwargs["text_kwargs"], ) return batch_query def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: num_image_tokens = [self.image_seq_length] * len(image_sizes) num_image_patches = [1] * len(image_sizes) vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches}) return MultiModalData(**vision_data) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names + ["token_type_ids", "labels"] image_processor_input_names = self.image_processor.model_input_names return list(tokenizer_input_names + image_processor_input_names) @property def query_augmentation_token(self) -> str: """ Return the query augmentation token. Query augmentation buffers are used as reasoning buffers during inference. """ return self.tokenizer.pad_token def process_images( self, images: Optional[ImageInput] = None, **kwargs: Unpack[ColPaliProcessorKwargs], ) -> BatchFeature: """ Prepare for the model one or several image(s). This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `images` and `kwargs` arguments to the image processor. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ return self.__call__(images=images, **kwargs) def process_queries( self, text: Union[TextInput, list[TextInput]], **kwargs: Unpack[ColPaliProcessorKwargs], ) -> BatchFeature: """ Prepare for the model one or several texts. This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `text` and `kwargs` arguments to the tokenizer. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). """ return self.__call__(text=text, **kwargs) def score_retrieval( self, query_embeddings: Union["torch.Tensor", list["torch.Tensor"]], passage_embeddings: Union["torch.Tensor", list["torch.Tensor"]], batch_size: int = 128, output_dtype: Optional["torch.dtype"] = None, output_device: Union["torch.device", str] = "cpu", ) -> "torch.Tensor": """ Compute the late-interaction/MaxSim score (ColBERT-like) for the given multi-vector query embeddings (`qs`) and passage embeddings (`ps`). For ColPali, a passage is the image of a document page. Because the embedding tensors are multi-vector and can thus have different shapes, they should be fed as: (1) a list of tensors, where the i-th tensor is of shape (sequence_length_i, embedding_dim) (2) a single tensor of shape (n_passages, max_sequence_length, embedding_dim) -> usually obtained by padding the list of tensors. Args: query_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Query embeddings. passage_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Passage embeddings. batch_size (`int`, *optional*, defaults to 128): Batch size for computing scores. output_dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The dtype of the output tensor. If `None`, the dtype of the input embeddings is used. output_device (`torch.device` or `str`, *optional*, defaults to "cpu"): The device of the output tensor. Returns: `torch.Tensor`: A tensor of shape `(n_queries, n_passages)` containing the scores. The score tensor is saved on the "cpu" device. """ if len(query_embeddings) == 0: raise ValueError("No queries provided") if len(passage_embeddings) == 0: raise ValueError("No passages provided") if query_embeddings[0].device != passage_embeddings[0].device: raise ValueError("Queries and passages must be on the same device") if query_embeddings[0].dtype != passage_embeddings[0].dtype: raise ValueError("Queries and passages must have the same dtype") if output_dtype is None: output_dtype = query_embeddings[0].dtype scores: list[torch.Tensor] = [] for i in range(0, len(query_embeddings), batch_size): batch_scores: list[torch.Tensor] = [] batch_queries = torch.nn.utils.rnn.pad_sequence( query_embeddings[i : i + batch_size], batch_first=True, padding_value=0 ) for j in range(0, len(passage_embeddings), batch_size): batch_passages = torch.nn.utils.rnn.pad_sequence( passage_embeddings[j : j + batch_size], batch_first=True, padding_value=0 ) batch_scores.append( torch.einsum("bnd,csd->bcns", batch_queries, batch_passages).max(dim=3)[0].sum(dim=2) ) scores.append(torch.cat(batch_scores, dim=1).to(output_dtype).to(output_device)) return torch.cat(scores, dim=0) __all__ = ["ColPaliProcessor"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mra/modeling_mra.py
src/transformers/models/mra/modeling_mra.py
# coding=utf-8 # Copyright 2023 University of Wisconsin-Madison and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MRA model.""" import math from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ... import initialization as init from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( auto_docstring, is_cuda_platform, is_kernels_available, is_ninja_available, is_torch_cuda_available, logging, ) from .configuration_mra import MraConfig logger = logging.get_logger(__name__) mra_cuda_kernel = None def load_cuda_kernels(): global mra_cuda_kernel if not is_kernels_available(): raise ImportError("kernels is not installed, please install it with `pip install kernels`") from ...integrations.hub_kernels import get_kernel mra_cuda_kernel = get_kernel("kernels-community/mra") def sparse_max(sparse_qk_prod, indices, query_num_block, key_num_block): """ Computes maximum values for softmax stability. """ if len(sparse_qk_prod.size()) != 4: raise ValueError("sparse_qk_prod must be a 4-dimensional tensor.") if len(indices.size()) != 2: raise ValueError("indices must be a 2-dimensional tensor.") if sparse_qk_prod.size(2) != 32: raise ValueError("The size of the second dimension of sparse_qk_prod must be 32.") if sparse_qk_prod.size(3) != 32: raise ValueError("The size of the third dimension of sparse_qk_prod must be 32.") index_vals = sparse_qk_prod.max(dim=-2).values.transpose(-1, -2) index_vals = index_vals.contiguous() indices = indices.int() indices = indices.contiguous() max_vals, max_vals_scatter = mra_cuda_kernel.index_max(index_vals, indices, query_num_block, key_num_block) max_vals_scatter = max_vals_scatter.transpose(-1, -2)[:, :, None, :] return max_vals, max_vals_scatter def sparse_mask(mask, indices, block_size=32): """ Converts attention mask to a sparse mask for high resolution logits. """ if len(mask.size()) != 2: raise ValueError("mask must be a 2-dimensional tensor.") if len(indices.size()) != 2: raise ValueError("indices must be a 2-dimensional tensor.") if mask.shape[0] != indices.shape[0]: raise ValueError("mask and indices must have the same size in the zero-th dimension.") batch_size, seq_len = mask.shape num_block = seq_len // block_size batch_idx = torch.arange(indices.size(0), dtype=torch.long, device=indices.device) mask = mask.reshape(batch_size, num_block, block_size) mask = mask[batch_idx[:, None], (indices % num_block).long(), :] return mask def mm_to_sparse(dense_query, dense_key, indices, block_size=32): """ Performs Sampled Dense Matrix Multiplication. """ batch_size, query_size, dim = dense_query.size() _, key_size, dim = dense_key.size() if query_size % block_size != 0: raise ValueError("query_size (size of first dimension of dense_query) must be divisible by block_size.") if key_size % block_size != 0: raise ValueError("key_size (size of first dimension of dense_key) must be divisible by block_size.") dense_query = dense_query.reshape(batch_size, query_size // block_size, block_size, dim).transpose(-1, -2) dense_key = dense_key.reshape(batch_size, key_size // block_size, block_size, dim).transpose(-1, -2) if len(dense_query.size()) != 4: raise ValueError("dense_query must be a 4-dimensional tensor.") if len(dense_key.size()) != 4: raise ValueError("dense_key must be a 4-dimensional tensor.") if len(indices.size()) != 2: raise ValueError("indices must be a 2-dimensional tensor.") if dense_query.size(3) != 32: raise ValueError("The third dimension of dense_query must be 32.") if dense_key.size(3) != 32: raise ValueError("The third dimension of dense_key must be 32.") dense_query = dense_query.contiguous() dense_key = dense_key.contiguous() indices = indices.int() indices = indices.contiguous() return mra_cuda_kernel.mm_to_sparse(dense_query, dense_key, indices.int()) def sparse_dense_mm(sparse_query, indices, dense_key, query_num_block, block_size=32): """ Performs matrix multiplication of a sparse matrix with a dense matrix. """ batch_size, key_size, dim = dense_key.size() if key_size % block_size != 0: raise ValueError("key_size (size of first dimension of dense_key) must be divisible by block_size.") if sparse_query.size(2) != block_size: raise ValueError("The size of the second dimension of sparse_query must be equal to the block_size.") if sparse_query.size(3) != block_size: raise ValueError("The size of the third dimension of sparse_query must be equal to the block_size.") dense_key = dense_key.reshape(batch_size, key_size // block_size, block_size, dim).transpose(-1, -2) if len(sparse_query.size()) != 4: raise ValueError("sparse_query must be a 4-dimensional tensor.") if len(dense_key.size()) != 4: raise ValueError("dense_key must be a 4-dimensional tensor.") if len(indices.size()) != 2: raise ValueError("indices must be a 2-dimensional tensor.") if dense_key.size(3) != 32: raise ValueError("The size of the third dimension of dense_key must be 32.") sparse_query = sparse_query.contiguous() indices = indices.int() indices = indices.contiguous() dense_key = dense_key.contiguous() dense_qk_prod = mra_cuda_kernel.sparse_dense_mm(sparse_query, indices, dense_key, query_num_block) dense_qk_prod = dense_qk_prod.transpose(-1, -2).reshape(batch_size, query_num_block * block_size, dim) return dense_qk_prod def transpose_indices(indices, dim_1_block, dim_2_block): return ((indices % dim_2_block) * dim_1_block + torch.div(indices, dim_2_block, rounding_mode="floor")).long() class MraSampledDenseMatMul(torch.autograd.Function): @staticmethod def forward(ctx, dense_query, dense_key, indices, block_size): sparse_qk_prod = mm_to_sparse(dense_query, dense_key, indices, block_size) ctx.save_for_backward(dense_query, dense_key, indices) ctx.block_size = block_size return sparse_qk_prod @staticmethod def backward(ctx, grad): dense_query, dense_key, indices = ctx.saved_tensors block_size = ctx.block_size query_num_block = dense_query.size(1) // block_size key_num_block = dense_key.size(1) // block_size indices_T = transpose_indices(indices, query_num_block, key_num_block) grad_key = sparse_dense_mm(grad.transpose(-1, -2), indices_T, dense_query, key_num_block) grad_query = sparse_dense_mm(grad, indices, dense_key, query_num_block) return grad_query, grad_key, None, None @staticmethod def operator_call(dense_query, dense_key, indices, block_size=32): return MraSampledDenseMatMul.apply(dense_query, dense_key, indices, block_size) class MraSparseDenseMatMul(torch.autograd.Function): @staticmethod def forward(ctx, sparse_query, indices, dense_key, query_num_block): sparse_qk_prod = sparse_dense_mm(sparse_query, indices, dense_key, query_num_block) ctx.save_for_backward(sparse_query, indices, dense_key) ctx.query_num_block = query_num_block return sparse_qk_prod @staticmethod def backward(ctx, grad): sparse_query, indices, dense_key = ctx.saved_tensors query_num_block = ctx.query_num_block key_num_block = dense_key.size(1) // sparse_query.size(-1) indices_T = transpose_indices(indices, query_num_block, key_num_block) grad_key = sparse_dense_mm(sparse_query.transpose(-1, -2), indices_T, grad, key_num_block) grad_query = mm_to_sparse(grad, dense_key, indices) return grad_query, None, grad_key, None @staticmethod def operator_call(sparse_query, indices, dense_key, query_num_block): return MraSparseDenseMatMul.apply(sparse_query, indices, dense_key, query_num_block) class MraReduceSum: @staticmethod def operator_call(sparse_query, indices, query_num_block, key_num_block): batch_size, num_block, block_size, _ = sparse_query.size() if len(sparse_query.size()) != 4: raise ValueError("sparse_query must be a 4-dimensional tensor.") if len(indices.size()) != 2: raise ValueError("indices must be a 2-dimensional tensor.") _, _, block_size, _ = sparse_query.size() batch_size, num_block = indices.size() sparse_query = sparse_query.sum(dim=2).reshape(batch_size * num_block, block_size) batch_idx = torch.arange(indices.size(0), dtype=torch.long, device=indices.device) global_idxes = ( torch.div(indices, key_num_block, rounding_mode="floor").long() + batch_idx[:, None] * query_num_block ).reshape(batch_size * num_block) temp = torch.zeros( (batch_size * query_num_block, block_size), dtype=sparse_query.dtype, device=sparse_query.device ) output = temp.index_add(0, global_idxes, sparse_query).reshape(batch_size, query_num_block, block_size) output = output.reshape(batch_size, query_num_block * block_size) return output def get_low_resolution_logit(query, key, block_size, mask=None, value=None): """ Compute low resolution approximation. """ batch_size, seq_len, head_dim = query.size() num_block_per_row = seq_len // block_size value_hat = None if mask is not None: token_count = mask.reshape(batch_size, num_block_per_row, block_size).sum(dim=-1) query_hat = query.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / ( token_count[:, :, None] + 1e-6 ) key_hat = key.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / ( token_count[:, :, None] + 1e-6 ) if value is not None: value_hat = value.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / ( token_count[:, :, None] + 1e-6 ) else: token_count = block_size * torch.ones(batch_size, num_block_per_row, dtype=torch.float, device=query.device) query_hat = query.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2) key_hat = key.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2) if value is not None: value_hat = value.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2) low_resolution_logit = torch.matmul(query_hat, key_hat.transpose(-1, -2)) / math.sqrt(head_dim) low_resolution_logit_row_max = low_resolution_logit.max(dim=-1, keepdims=True).values if mask is not None: low_resolution_logit = ( low_resolution_logit - 1e4 * ((token_count[:, None, :] * token_count[:, :, None]) < 0.5).float() ) return low_resolution_logit, token_count, low_resolution_logit_row_max, value_hat def get_block_idxes( low_resolution_logit, num_blocks, approx_mode, initial_prior_first_n_blocks, initial_prior_diagonal_n_blocks ): """ Compute the indices of the subset of components to be used in the approximation. """ batch_size, total_blocks_per_row, _ = low_resolution_logit.shape if initial_prior_diagonal_n_blocks > 0: offset = initial_prior_diagonal_n_blocks // 2 temp_mask = torch.ones(total_blocks_per_row, total_blocks_per_row, device=low_resolution_logit.device) diagonal_mask = torch.tril(torch.triu(temp_mask, diagonal=-offset), diagonal=offset) low_resolution_logit = low_resolution_logit + diagonal_mask[None, :, :] * 5e3 if initial_prior_first_n_blocks > 0: low_resolution_logit[:, :initial_prior_first_n_blocks, :] = ( low_resolution_logit[:, :initial_prior_first_n_blocks, :] + 5e3 ) low_resolution_logit[:, :, :initial_prior_first_n_blocks] = ( low_resolution_logit[:, :, :initial_prior_first_n_blocks] + 5e3 ) top_k_vals = torch.topk( low_resolution_logit.reshape(batch_size, -1), num_blocks, dim=-1, largest=True, sorted=False ) indices = top_k_vals.indices if approx_mode == "full": threshold = top_k_vals.values.min(dim=-1).values high_resolution_mask = (low_resolution_logit >= threshold[:, None, None]).float() elif approx_mode == "sparse": high_resolution_mask = None else: raise ValueError(f"{approx_mode} is not a valid approx_model value.") return indices, high_resolution_mask def mra2_attention( query, key, value, mask, num_blocks, approx_mode, block_size=32, initial_prior_first_n_blocks=0, initial_prior_diagonal_n_blocks=0, ): """ Use Mra to approximate self-attention. """ if mra_cuda_kernel is None: return torch.zeros_like(query).requires_grad_() batch_size, num_head, seq_len, head_dim = query.size() meta_batch = batch_size * num_head if seq_len % block_size != 0: raise ValueError("sequence length must be divisible by the block_size.") num_block_per_row = seq_len // block_size query = query.reshape(meta_batch, seq_len, head_dim) key = key.reshape(meta_batch, seq_len, head_dim) value = value.reshape(meta_batch, seq_len, head_dim) if mask is not None: query = query * mask[:, :, None] key = key * mask[:, :, None] value = value * mask[:, :, None] if approx_mode == "full": low_resolution_logit, token_count, low_resolution_logit_row_max, value_hat = get_low_resolution_logit( query, key, block_size, mask, value ) elif approx_mode == "sparse": with torch.no_grad(): low_resolution_logit, token_count, low_resolution_logit_row_max, _ = get_low_resolution_logit( query, key, block_size, mask ) else: raise Exception('approx_mode must be "full" or "sparse"') with torch.no_grad(): low_resolution_logit_normalized = low_resolution_logit - low_resolution_logit_row_max indices, high_resolution_mask = get_block_idxes( low_resolution_logit_normalized, num_blocks, approx_mode, initial_prior_first_n_blocks, initial_prior_diagonal_n_blocks, ) high_resolution_logit = MraSampledDenseMatMul.operator_call( query, key, indices, block_size=block_size ) / math.sqrt(head_dim) max_vals, max_vals_scatter = sparse_max(high_resolution_logit, indices, num_block_per_row, num_block_per_row) high_resolution_logit = high_resolution_logit - max_vals_scatter if mask is not None: high_resolution_logit = high_resolution_logit - 1e4 * (1 - sparse_mask(mask, indices)[:, :, :, None]) high_resolution_attn = torch.exp(high_resolution_logit) high_resolution_attn_out = MraSparseDenseMatMul.operator_call( high_resolution_attn, indices, value, num_block_per_row ) high_resolution_normalizer = MraReduceSum.operator_call( high_resolution_attn, indices, num_block_per_row, num_block_per_row ) if approx_mode == "full": low_resolution_attn = ( torch.exp(low_resolution_logit - low_resolution_logit_row_max - 1e4 * high_resolution_mask) * token_count[:, None, :] ) low_resolution_attn_out = ( torch.matmul(low_resolution_attn, value_hat)[:, :, None, :] .repeat(1, 1, block_size, 1) .reshape(meta_batch, seq_len, head_dim) ) low_resolution_normalizer = ( low_resolution_attn.sum(dim=-1)[:, :, None].repeat(1, 1, block_size).reshape(meta_batch, seq_len) ) log_correction = low_resolution_logit_row_max.repeat(1, 1, block_size).reshape(meta_batch, seq_len) - max_vals if mask is not None: log_correction = log_correction * mask low_resolution_corr = torch.exp(log_correction * (log_correction <= 0).float()) low_resolution_attn_out = low_resolution_attn_out * low_resolution_corr[:, :, None] low_resolution_normalizer = low_resolution_normalizer * low_resolution_corr high_resolution_corr = torch.exp(-log_correction * (log_correction > 0).float()) high_resolution_attn_out = high_resolution_attn_out * high_resolution_corr[:, :, None] high_resolution_normalizer = high_resolution_normalizer * high_resolution_corr context_layer = (high_resolution_attn_out + low_resolution_attn_out) / ( high_resolution_normalizer[:, :, None] + low_resolution_normalizer[:, :, None] + 1e-6 ) elif approx_mode == "sparse": context_layer = high_resolution_attn_out / (high_resolution_normalizer[:, :, None] + 1e-6) else: raise Exception('config.approx_mode must be "full" or "sparse"') if mask is not None: context_layer = context_layer * mask[:, :, None] context_layer = context_layer.reshape(batch_size, num_head, seq_len, head_dim) return context_layer class MraEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MraSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) kernel_loaded = mra_cuda_kernel is not None if is_torch_cuda_available() and is_cuda_platform() and is_ninja_available() and not kernel_loaded: try: load_cuda_kernels() except Exception as e: logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.num_block = (config.max_position_embeddings // 32) * config.block_per_row self.num_block = min(self.num_block, int((config.max_position_embeddings // 32) ** 2)) self.approx_mode = config.approx_mode self.initial_prior_first_n_blocks = config.initial_prior_first_n_blocks self.initial_prior_diagonal_n_blocks = config.initial_prior_diagonal_n_blocks def forward(self, hidden_states, attention_mask=None): batch_size, seq_len, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # revert changes made by get_extended_attention_mask attention_mask = 1.0 + attention_mask / 10000.0 attention_mask = ( attention_mask.squeeze() .repeat(1, self.num_attention_heads, 1) .reshape(batch_size * self.num_attention_heads, seq_len) .int() ) # The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs # smaller than this are padded with zeros. gpu_warp_size = 32 if self.attention_head_size < gpu_warp_size: pad_size = batch_size, self.num_attention_heads, seq_len, gpu_warp_size - self.attention_head_size query_layer = torch.cat([query_layer, torch.zeros(pad_size, device=query_layer.device)], dim=-1) key_layer = torch.cat([key_layer, torch.zeros(pad_size, device=key_layer.device)], dim=-1) value_layer = torch.cat([value_layer, torch.zeros(pad_size, device=value_layer.device)], dim=-1) context_layer = mra2_attention( query_layer.float(), key_layer.float(), value_layer.float(), attention_mask.float(), self.num_block, approx_mode=self.approx_mode, initial_prior_first_n_blocks=self.initial_prior_first_n_blocks, initial_prior_diagonal_n_blocks=self.initial_prior_diagonal_n_blocks, ) if self.attention_head_size < gpu_warp_size: context_layer = context_layer[:, :, :, : self.attention_head_size] context_layer = context_layer.reshape(batch_size, self.num_attention_heads, seq_len, self.attention_head_size) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class MraSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MraAttention(nn.Module): def __init__(self, config): super().__init__() self.self = MraSelfAttention(config) self.output = MraSelfOutput(config) def forward(self, hidden_states, attention_mask=None): self_outputs = self.self(hidden_states, attention_mask) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class MraIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class MraOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MraLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = MraAttention(config) self.add_cross_attention = config.add_cross_attention self.intermediate = MraIntermediate(config) self.output = MraOutput(config) def forward(self, hidden_states, attention_mask=None): self_attention_outputs = self.attention(hidden_states, attention_mask) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class MraEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([MraLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask) hidden_states = layer_outputs[0] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform class MraPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Mra class MraLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = MraPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=True) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Mra class MraOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = MraLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores @auto_docstring # Copied from transformers.models.yoso.modeling_yoso.YosoPreTrainedModel with Yoso->Mra,yoso->mra class MraPreTrainedModel(PreTrainedModel): config: MraConfig base_model_prefix = "mra" supports_gradient_checkpointing = True @torch.no_grad() def _init_weights(self, module: nn.Module): """Initialize the weights""" super()._init_weights(module) if isinstance(module, MraLMPredictionHead): init.zeros_(module.bias) elif isinstance(module, MraEmbeddings): init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)) + 2) init.zeros_(module.token_type_ids) @auto_docstring class MraModel(MraPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = MraEmbeddings(config) self.encoder = MraEncoder(config)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mra/__init__.py
src/transformers/models/mra/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_mra import * from .modeling_mra import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mra/convert_mra_pytorch_to_pytorch.py
src/transformers/models/mra/convert_mra_pytorch_to_pytorch.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert MRA checkpoints from the original repository. URL: https://github.com/mlpen/mra-attention""" import argparse import torch from transformers import MraConfig, MraForMaskedLM def rename_key(orig_key): if "model" in orig_key: orig_key = orig_key.replace("model.", "") if "norm1" in orig_key: orig_key = orig_key.replace("norm1", "attention.output.LayerNorm") if "norm2" in orig_key: orig_key = orig_key.replace("norm2", "output.LayerNorm") if "norm" in orig_key: orig_key = orig_key.replace("norm", "LayerNorm") if "transformer" in orig_key: layer_num = orig_key.split(".")[0].split("_")[-1] orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}") if "mha.attn" in orig_key: orig_key = orig_key.replace("mha.attn", "attention.self") if "mha" in orig_key: orig_key = orig_key.replace("mha", "attention") if "W_q" in orig_key: orig_key = orig_key.replace("W_q", "self.query") if "W_k" in orig_key: orig_key = orig_key.replace("W_k", "self.key") if "W_v" in orig_key: orig_key = orig_key.replace("W_v", "self.value") if "ff.0" in orig_key: orig_key = orig_key.replace("ff.0", "intermediate.dense") if "ff.2" in orig_key: orig_key = orig_key.replace("ff.2", "output.dense") if "ff" in orig_key: orig_key = orig_key.replace("ff", "output.dense") if "mlm_class" in orig_key: orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder") if "mlm" in orig_key: orig_key = orig_key.replace("mlm", "cls.predictions.transform") if "backbone.backbone.encoders" in orig_key: orig_key = orig_key.replace("backbone.backbone.encoders", "encoder.layer") if "cls" not in orig_key: orig_key = "mra." + orig_key return orig_key def convert_checkpoint_helper(max_position_embeddings, orig_state_dict): for key in orig_state_dict.copy(): val = orig_state_dict.pop(key) if ("pooler" in key) or ("sen_class" in key): continue else: orig_state_dict[rename_key(key)] = val orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"] orig_state_dict["mra.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2 return orig_state_dict def convert_mra_checkpoint(checkpoint_path, mra_config_file, pytorch_dump_path): orig_state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model_state_dict"] config = MraConfig.from_json_file(mra_config_file) model = MraForMaskedLM(config) new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict) print(model.load_state_dict(new_state_dict)) model.eval() model.save_pretrained(pytorch_dump_path) print(f"Checkpoint successfully converted. Model saved at {pytorch_dump_path}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to Mra pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for Mra model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_mra_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mra/configuration_mra.py
src/transformers/models/mra/configuration_mra.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MRA model configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class MraConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`MraModel`]. It is used to instantiate an MRA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mra [uw-madison/mra-base-512-4](https://huggingface.co/uw-madison/mra-base-512-4) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the Mra model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MraModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 1): The vocabulary size of the `token_type_ids` passed when calling [`MraModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. block_per_row (`int`, *optional*, defaults to 4): Used to set the budget for the high resolution scale. approx_mode (`str`, *optional*, defaults to `"full"`): Controls whether both low and high resolution approximations are used. Set to `"full"` for both low and high resolution and `"sparse"` for only low resolution. initial_prior_first_n_blocks (`int`, *optional*, defaults to 0): The initial number of blocks for which high resolution is used. initial_prior_diagonal_n_blocks (`int`, *optional*, defaults to 0): The number of diagonal blocks for which high resolution is used. Example: ```python >>> from transformers import MraConfig, MraModel >>> # Initializing a Mra uw-madison/mra-base-512-4 style configuration >>> configuration = MraConfig() >>> # Initializing a model (with random weights) from the uw-madison/mra-base-512-4 style configuration >>> model = MraModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mra" def __init__( self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=1, initializer_range=0.02, layer_norm_eps=1e-5, block_per_row=4, approx_mode="full", initial_prior_first_n_blocks=0, initial_prior_diagonal_n_blocks=0, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.block_per_row = block_per_row self.approx_mode = approx_mode self.initial_prior_first_n_blocks = initial_prior_first_n_blocks self.initial_prior_diagonal_n_blocks = initial_prior_diagonal_n_blocks __all__ = ["MraConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vitdet/configuration_vitdet.py
src/transformers/models/vitdet/configuration_vitdet.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VitDet model configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class VitDetConfig(BackboneConfigMixin, PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`VitDetModel`]. It is used to instantiate an VitDet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VitDet [google/vitdet-base-patch16-224](https://huggingface.co/google/vitdet-base-patch16-224) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. mlp_ratio (`int`, *optional*, defaults to 4): Ratio of mlp hidden dim to embedding dim. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. pretrain_image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image during pretraining. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. drop_path_rate (`float`, *optional*, defaults to 0.0): Stochastic depth rate. window_block_indices (`list[int]`, *optional*, defaults to `[]`): List of indices of blocks that should have window attention instead of regular global self-attention. residual_block_indices (`list[int]`, *optional*, defaults to `[]`): List of indices of blocks that should have an extra residual block after the MLP. use_absolute_position_embeddings (`bool`, *optional*, defaults to `True`): Whether to add absolute position embeddings to the patch embeddings. use_relative_position_embeddings (`bool`, *optional*, defaults to `False`): Whether to add relative position embeddings to the attention maps. window_size (`int`, *optional*, defaults to 0): The size of the attention window. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import VitDetConfig, VitDetModel >>> # Initializing a VitDet configuration >>> configuration = VitDetConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = VitDetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vitdet" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mlp_ratio=4, hidden_act="gelu", dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-6, image_size=224, pretrain_image_size=224, patch_size=16, num_channels=3, qkv_bias=True, drop_path_rate=0.0, window_block_indices=[], residual_block_indices=[], use_absolute_position_embeddings=True, use_relative_position_embeddings=False, window_size=0, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_ratio = mlp_ratio self.hidden_act = hidden_act self.dropout_prob = dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.pretrain_image_size = pretrain_image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.drop_path_rate = drop_path_rate self.window_block_indices = window_block_indices self.residual_block_indices = residual_block_indices self.use_absolute_position_embeddings = use_absolute_position_embeddings self.use_relative_position_embeddings = use_relative_position_embeddings self.window_size = window_size self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) __all__ = ["VitDetConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vitdet/modeling_vitdet.py
src/transformers/models/vitdet/modeling_vitdet.py
# coding=utf-8 # Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ViTDet backbone.""" import collections.abc import math from typing import Optional, Union import torch from torch import nn from ... import initialization as init from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ...utils.backbone_utils import BackboneMixin from .configuration_vitdet import VitDetConfig logger = logging.get_logger(__name__) class VitDetEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.pretrain_image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches if config.use_absolute_position_embeddings: # Initialize absolute positional embedding with pretrain image size. num_positions = num_patches + 1 self.position_embeddings = nn.Parameter(torch.zeros(1, num_positions, config.hidden_size)) else: self.position_embeddings = None self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def get_absolute_positions(self, abs_pos_embeddings, has_cls_token, height, width): """ Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token dimension for the original embeddings. Args: abs_pos_embeddings (`torch.Tensor`): Absolute positional embeddings with (1, num_position, num_channels). has_cls_token (`bool`): If true, has 1 embedding in abs_pos_embeddings for cls token. height (`int`): Height of input image tokens. width (`int`): Width of input image tokens. Returns: Absolute positional embeddings after processing with shape (1, height, width, num_channels) """ if has_cls_token: abs_pos_embeddings = abs_pos_embeddings[:, 1:] num_position = abs_pos_embeddings.shape[1] size = int(math.sqrt(num_position)) # This is a constant and can be recorded as such in the ONNX export. if size * size != num_position: raise ValueError("Absolute position embeddings must be a square number.") if torch.jit.is_tracing() or (size != height or size != width): # nn.functional.interpolate is a noop in case size == height and size == width - we need to always capture this path with jit.trace. new_abs_pos_embeddings = nn.functional.interpolate( abs_pos_embeddings.reshape(1, size, size, -1).permute(0, 3, 1, 2), size=(height, width), mode="bicubic", align_corners=False, ) return new_abs_pos_embeddings.permute(0, 2, 3, 1) else: return abs_pos_embeddings.reshape(1, height, width, -1) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." f" Expected {self.num_channels} but got {num_channels}." ) embeddings = self.projection(pixel_values) if self.position_embeddings is not None: # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels) embeddings = embeddings.permute(0, 2, 3, 1) # add position embeddings embeddings = embeddings + self.get_absolute_positions( self.position_embeddings, True, embeddings.shape[1], embeddings.shape[2] ) # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width) embeddings = embeddings.permute(0, 3, 1, 2) return embeddings @torch.jit.script_if_tracing # nn.functional.interpolate's `size` needs to be dynamic. def get_rel_pos(q_size, k_size, rel_pos): """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (`int`): Size of query q. k_size (`int`): Size of key k. rel_pos (`torch.Tensor`): Relative position embeddings (num_embeddings, num_channels). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel position embeddings. rel_pos_resized = nn.functional.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] def add_decomposed_relative_positions(attn, queries, rel_pos_h, rel_pos_w, q_size, k_size): """ Calculate decomposed Relative Positional Embeddings as introduced in [MViT2](https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py). Args: attn (`torch.Tensor`): Attention map. queries (`torch.Tensor`): Query q in the attention layer with shape (batch_size, queries_height * queries_width, num_channels). rel_pos_h (`torch.Tensor`): Relative position embeddings (Lh, num_channels) for height axis. rel_pos_w (`torch.Tensor`): Relative position embeddings (Lw, num_channels) for width axis. q_size (`tuple[int]`): Spatial sequence size of query q with (queries_height, queries_width). k_size (`tuple[int]`): Spatial sequence size of key k with (keys_height, keys_width). Returns: attn (Tensor): attention map with added relative positional embeddings. """ queries_height, queries_width = q_size keys_height, keys_width = k_size relative_height = get_rel_pos(queries_height, keys_height, rel_pos_h) relative_width = get_rel_pos(queries_width, keys_width, rel_pos_w) batch_size, _, dim = queries.shape r_q = queries.reshape(batch_size, queries_height, queries_width, dim) relative_height = torch.einsum("bhwc,hkc->bhwk", r_q, relative_height) relative_weight = torch.einsum("bhwc,wkc->bhwk", r_q, relative_width) attn = ( attn.view(batch_size, queries_height, queries_width, keys_height, keys_width) + relative_height[:, :, :, :, None] + relative_weight[:, :, :, None, :] ).view(batch_size, queries_height * queries_width, keys_height * keys_width) return attn class VitDetAttention(nn.Module): """Multi-head Attention block with relative position embeddings.""" def __init__(self, config, input_size=None): """ Args: config (`VitDetConfig`): Model configuration. input_size (`tuple[int]`, *optional*): Input resolution, only required in case relative position embeddings are added. """ super().__init__() dim = config.hidden_size num_heads = config.num_attention_heads self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=config.qkv_bias) self.proj = nn.Linear(dim, dim) self.use_relative_position_embeddings = config.use_relative_position_embeddings if self.use_relative_position_embeddings: # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) def forward(self, hidden_state, output_attentions=False): batch_size, height, width, _ = hidden_state.shape # qkv with shape (3, batch_size, num_heads, height * width, num_channels) qkv = self.qkv(hidden_state).reshape(batch_size, height * width, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # queries, keys and values have shape (batch_size * num_heads, height * width, num_channels) queries, keys, values = qkv.reshape(3, batch_size * self.num_heads, height * width, -1).unbind(0) attention_scores = (queries * self.scale) @ keys.transpose(-2, -1) if self.use_relative_position_embeddings: attention_scores = add_decomposed_relative_positions( attention_scores, queries, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) attention_probs = attention_scores.softmax(dim=-1) hidden_state = attention_probs @ values hidden_state = hidden_state.view(batch_size, self.num_heads, height, width, -1) hidden_state = hidden_state.permute(0, 2, 3, 1, 4) hidden_state = hidden_state.reshape(batch_size, height, width, -1) hidden_state = self.proj(hidden_state) if output_attentions: attention_probs = attention_probs.reshape( batch_size, self.num_heads, attention_probs.shape[-2], attention_probs.shape[-1] ) outputs = (hidden_state, attention_probs) else: outputs = (hidden_state,) return outputs # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath class VitDetDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" class VitDetLayerNorm(nn.Module): """ A LayerNorm variant, popularized by Transformers, that performs point-wise mean and variance normalization over the channel dimension for inputs that have shape (batch_size, channels, height, width). https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 """ def __init__(self, normalized_shape, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.normalized_shape = (normalized_shape,) def forward(self, x): u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x class VitDetResBottleneckBlock(nn.Module): """ The standard bottleneck residual block without the last activation layer. It contains 3 conv layers with kernels 1x1, 3x3, 1x1. """ def __init__(self, config, in_channels, out_channels, bottleneck_channels): """ Args: config (`VitDetConfig`): Model configuration. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. bottleneck_channels (`int`): Number of output channels for the 3x3 "bottleneck" conv layers. """ super().__init__() self.conv1 = nn.Conv2d(in_channels, bottleneck_channels, 1, bias=False) self.norm1 = VitDetLayerNorm(bottleneck_channels) self.act1 = ACT2FN[config.hidden_act] self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels, 3, padding=1, bias=False) self.norm2 = VitDetLayerNorm(bottleneck_channels) self.act2 = ACT2FN[config.hidden_act] self.conv3 = nn.Conv2d(bottleneck_channels, out_channels, 1, bias=False) self.norm3 = VitDetLayerNorm(out_channels) def forward(self, x): out = x for layer in self.children(): out = layer(out) out = x + out return out class VitDetMlp(nn.Module): def __init__(self, config, in_features: int, hidden_features: int) -> None: super().__init__() self.fc1 = nn.Linear(in_features, hidden_features) self.act = ACT2FN[config.hidden_act] self.fc2 = nn.Linear(hidden_features, in_features) self.drop = nn.Dropout(config.dropout_prob) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def window_partition(hidden_state, window_size): """ Partition into non-overlapping windows with padding if needed. Args: hidden_state (`torch.Tensor`): Input tokens with [batch_size, height, width, num_channels]. window_size (`int`): Window size. Returns: `tuple(torch.FloatTensor)` comprising various elements: - windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels]. - (padded_height, padded_width): padded height and width before partition """ batch_size, height, width, num_channels = hidden_state.shape pad_height = (window_size - height % window_size) % window_size pad_width = (window_size - width % window_size) % window_size # Noop in case pad_width == 0 and pad_height == 0. hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height)) padded_height, padded_width = height + pad_height, width + pad_width hidden_state = hidden_state.view( batch_size, padded_height // window_size, window_size, padded_width // window_size, window_size, num_channels ) windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows, (padded_height, padded_width) def window_unpartition(windows, window_size, pad_height_width, height_width): """ Window unpartition into original sequences and removing padding. Args: windows (`torch.Tensor`): Input tokens with [batch_size * num_windows, window_size, window_size, num_channels]. window_size (`int`): Window size. pad_height_width (`tuple[int]`): Padded height and width (padded_height, padded_width). height_width (`tuple[int]`): Original height and width before padding. Returns: hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels]. """ padded_height, padded_width = pad_height_width height, width = height_width batch_size = windows.shape[0] // (padded_height * padded_width // window_size // window_size) hidden_state = windows.view( batch_size, padded_height // window_size, padded_width // window_size, window_size, window_size, -1 ) hidden_state = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous() hidden_state = hidden_state.view(batch_size, padded_height, padded_width, -1) # We always have height <= padded_height and width <= padded_width hidden_state = hidden_state[:, :height, :width, :].contiguous() return hidden_state class VitDetLayer(GradientCheckpointingLayer): """This corresponds to the Block class in the original implementation.""" def __init__( self, config: VitDetConfig, drop_path_rate: float = 0, window_size: int = 0, use_residual_block: bool = False ) -> None: super().__init__() dim = config.hidden_size image_size = config.image_size image_size = image_size if isinstance(image_size, (list, tuple)) else (image_size, image_size) patch_size = config.patch_size patch_size = patch_size if isinstance(patch_size, (list, tuple)) else (patch_size, patch_size) input_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = VitDetAttention( config, input_size=input_size if window_size == 0 else (window_size, window_size) ) self.drop_path = VitDetDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.mlp = VitDetMlp(config=config, in_features=dim, hidden_features=int(dim * config.mlp_ratio)) self.window_size = window_size self.use_residual_block = use_residual_block if self.use_residual_block: # Use a residual block with bottleneck channel as dim // 2 self.residual = VitDetResBottleneckBlock( config=config, in_channels=dim, out_channels=dim, bottleneck_channels=dim // 2, ) def forward( self, hidden_states: torch.Tensor, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: hidden_states = hidden_states.permute(0, 2, 3, 1) shortcut = hidden_states hidden_states = self.norm1(hidden_states) # Window partition if self.window_size > 0: height, width = hidden_states.shape[1], hidden_states.shape[2] hidden_states, pad_height_width = window_partition(hidden_states, self.window_size) self_attention_outputs = self.attention( hidden_states, output_attentions=output_attentions, ) hidden_states = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # Reverse window partition if self.window_size > 0: hidden_states = window_unpartition(hidden_states, self.window_size, pad_height_width, (height, width)) # first residual connection hidden_states = shortcut + self.drop_path(hidden_states) hidden_states = hidden_states + self.drop_path(self.mlp(self.norm2(hidden_states))) hidden_states = hidden_states.permute(0, 3, 1, 2) if self.use_residual_block: hidden_states = self.residual(hidden_states) outputs = (hidden_states,) + outputs return outputs class VitDetEncoder(nn.Module): def __init__(self, config: VitDetConfig) -> None: super().__init__() self.config = config depth = config.num_hidden_layers # stochastic depth decay rule drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, depth, device="cpu")] layers = [] for i in range(depth): layers.append( VitDetLayer( config, drop_path_rate=drop_path_rate[i], window_size=config.window_size if i in config.window_block_indices else 0, use_residual_block=i in config.residual_block_indices, ) ) self.layer = nn.ModuleList(layers) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring class VitDetPreTrainedModel(PreTrainedModel): config: VitDetConfig base_model_prefix = "vitdet" main_input_name = "pixel_values" input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = [] @torch.no_grad() def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): init.trunc_normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, nn.LayerNorm): init.zeros_(module.bias) init.ones_(module.weight) elif isinstance(module, VitDetEmbeddings): init.trunc_normal_(module.position_embeddings, mean=0.0, std=self.config.initializer_range) elif isinstance(module, VitDetAttention) and self.config.use_relative_position_embeddings: init.trunc_normal_(module.rel_pos_h, mean=0.0, std=self.config.initializer_range) init.trunc_normal_(module.rel_pos_w, mean=0.0, std=self.config.initializer_range) elif isinstance(module, VitDetResBottleneckBlock): for layer in [module.conv1, module.conv2, module.conv3]: init.kaiming_normal_(layer.weight, mode="fan_out", nonlinearity="relu") if layer.bias is not None: init.constant_(layer.bias, 0) for layer in [module.norm1, module.norm2]: init.ones_(layer.weight) init.zeros_(layer.bias) # zero init last norm layer. init.zeros_(module.norm3.weight) init.zeros_(module.norm3.bias) @auto_docstring class VitDetModel(VitDetPreTrainedModel): def __init__(self, config: VitDetConfig): super().__init__(config) self.config = config self.embeddings = VitDetEmbeddings(config) self.encoder = VitDetEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> VitDetEmbeddings: return self.embeddings.projection @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, BaseModelOutput]: r""" Examples: ```python >>> from transformers import VitDetConfig, VitDetModel >>> import torch >>> config = VitDetConfig() >>> model = VitDetModel(config) >>> pixel_values = torch.randn(1, 3, 224, 224) >>> with torch.no_grad(): ... outputs = model(pixel_values) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 768, 14, 14] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" ViTDet backbone, to be used with frameworks like Mask R-CNN. """ ) class VitDetBackbone(VitDetPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.embeddings = VitDetEmbeddings(config) self.encoder = VitDetEncoder(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] # initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> VitDetEmbeddings: return self.embeddings.projection @auto_docstring def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> BackboneOutput: r""" Examples: ```python >>> from transformers import VitDetConfig, VitDetBackbone >>> import torch >>> config = VitDetConfig() >>> model = VitDetBackbone(config) >>> pixel_values = torch.randn(1, 3, 224, 224) >>> with torch.no_grad(): ... outputs = model(pixel_values) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 768, 14, 14] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict, ) hidden_states = outputs.hidden_states if return_dict else outputs[1] feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: feature_maps += (hidden_state,) if not return_dict: if output_hidden_states: output = (feature_maps,) + outputs[1:] else: output = (feature_maps,) + outputs[2:] return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) __all__ = ["VitDetModel", "VitDetPreTrainedModel", "VitDetBackbone"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vitdet/__init__.py
src/transformers/models/vitdet/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_vitdet import * from .modeling_vitdet import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/shieldgemma2/convert_shieldgemma2_weights_orbax_to_hf.py
src/transformers/models/shieldgemma2/convert_shieldgemma2_weights_orbax_to_hf.py
r"""Utility to convert Gemma models from Orbax to HF Transformers checkpoint. python -m transformers.models.shieldgemma2.convert_shieldgemma2_weights_orbax_to_hf \ --tokenizer_path="$HOME/gemma3/tokenizer/gemma3_cleaned_262144_v2.spiece.model" \ --checkpoint_path_gemma="$HOME/gemma3/gemma3_4b_pt_orbax/" \ --checkpoint_path_shieldgemma="$HOME/shieldgemma2/shieldgemma-2_4b_orbax/" \ --output_path="$HOME/shieldgemma2/shieldgemma2_4b_pt_safetensors/" \ --precision='bfloat16' """ import dataclasses from collections.abc import Iterator, Mapping, Sequence from typing import Any import accelerate import numpy as np import torch import tree from absl import app, flags, logging from orbax import checkpoint as obc from ...image_utils import PILImageResampling from ..gemma import GemmaTokenizerFast from ..gemma3 import Gemma3ImageProcessor, Gemma3TextConfig from ..siglip import SiglipVisionConfig from . import ShieldGemma2Config, ShieldGemma2ForImageClassification, ShieldGemma2Processor # ==== Internal Constants and Classes ==== _CHAT_TEMPLATE = """{{ bos_token }} {%- for message in messages -%} {%- if message['content'] is iterable -%} {%- for item in message['content'] -%} {{ '<start_of_turn>user\n' }} {%- if item['type'] == 'image' -%} {{ '<start_of_image>' }} {%- elif item['type'] == 'text' -%} {{ 'You are a policy expert trying to help determine whether a generated or real image is in' }} {{ ' violation of the safety policy below. Your decision must very clearly follow the specific'}} {{ ' language used in the safety policy and should not include any interpretation outside of the' }} {{ ' safety policy.\n\n' }} {{ '<BEGIN_SAFETY_POLICY>\n' }} {{ item['text'] + '\n' }} {{ '</END_SAFETY_POLICY>\n\n' }} {{ 'Does the image violate the above policy? Your Answer must start with \\'Yes\\' or \\'No\\'.' }} {{ '<end_of_turn>\n' }} {%- endif -%} {%- endfor -%} {{'<start_of_turn>model\n'}} {%- else -%} {{ raise_exception("Conversation messages must contain iterable content containing images and policy definitions in text.") }} {%- endif -%} {%- endfor -%} """ _DTYPES = { "float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16, } _SHIELDGEMMA2_POLICIES: Mapping[str, str] = { "dangerous": ( "No Dangerous Content: The image shall not contain content that" " facilitates or encourages activities that could cause real-world harm" " (e.g., building firearms and explosive devices, promotion of" " terrorism, instructions for suicide)." ), "sexual": ( "No Sexually Explicit content: The image shall not contain content that" " depicts explicit or graphic sexual acts (e.g., pornography, erotic" " nudity, depictions of rape or sexual assault)." ), "violence": ( "No Violence/Gore content: The image shall not contain content that" " depicts shocking, sensational, or gratuitous violence (e.g.," " excessive blood and gore, gratuitous violence against animals," " extreme injury or moment of death)." ), } _SIGLIP_BASE = "SigLiPFromPatches_0/siglip_encoder" _SIGLIP_EMBEDDING = "SigLiPFromPatches_0/siglip_encoder/embedding" _SIGLIP_TRANSFORMER_ENCODER_BLOCK = "SigLiPFromPatches_0/siglip_encoder/Transformer/encoderblock_" _SIGLIP_TRANSFORMER_ENCODER_BLOCK_LEN = len(_SIGLIP_TRANSFORMER_ENCODER_BLOCK) _SIGLIP_TRANSFORMER_ENCODER_NORM = "SigLiPFromPatches_0/siglip_encoder/Transformer/encoder_norm" _TRANSFORMER_DECODER_BLOCK = "transformer/layer_" _TRANSFORMER_DECODER_BLOCK_LEN = len(_TRANSFORMER_DECODER_BLOCK) _TRANSFORMER_EMBEDDER = "transformer/embedder" _TRANSFORMER_FINAL_NORM = "transformer/final_norm" _TRANSFORMER_POST_TRAINING_PREFIX = "rlx_networks/policy_network/" _TRANSFORMER_POST_TRAINING_PREFIX_LEN = len(_TRANSFORMER_POST_TRAINING_PREFIX) # ==== Flags ==== _GEMMA_CHECKPOINT_PATH = flags.DEFINE_string( name="checkpoint_path_gemma", default=None, help="Path to the Orbax checkpoint containing the vision weights.", required=True, ) _SHIELDGEMMA_CHECKPOINT_PATH = flags.DEFINE_string( name="checkpoint_path_shieldgemma", default=None, help="Path to the Orbax checkpoint containing the language model weights.", required=True, ) OUTPUT_PATH = flags.DEFINE_string( name="output_path", default=None, help="Path to store the HF checkpoint.", required=True, ) PRECISION = flags.DEFINE_enum( name="precision", default=None, help="The floating point precision (aka dtype) of the model.", enum_values=set(_DTYPES.keys()), required=True, ) TOKENIZER_PATH = flags.DEFINE_string( name="tokenizer_path", default=None, help="Path to the SentencePiece model file.", required=True, ) def convert_siglip_weight( config: SiglipVisionConfig, paths: Sequence[str], weights: np.ndarray, ) -> tuple[str, np.ndarray]: path, prop = paths normalized_path: str = "" updated_weights: np.ndarray = None if path == _SIGLIP_BASE: normalized_path = "vision_tower.vision_model.embeddings.position_embedding.weight" updated_weights = weights.reshape(-1, config.hidden_size) elif path == _SIGLIP_EMBEDDING: if prop == "kernel": normalized_path = "vision_tower.vision_model.embeddings.patch_embedding.weight" updated_weights = weights.transpose(3, 2, 0, 1) elif prop == "bias": normalized_path = "vision_tower.vision_model.embeddings.patch_embedding.bias" updated_weights = weights else: raise ValueError(f"Unexpected member, `{prop}`, for path `{path}`. Should be `bias` or `kernel`.") elif path.startswith(_SIGLIP_TRANSFORMER_ENCODER_BLOCK): encoder_block_path = path[_SIGLIP_TRANSFORMER_ENCODER_BLOCK_LEN:] next_path_separator_idx = encoder_block_path.find("/") layer_idx = encoder_block_path[:next_path_separator_idx] encoder_block_path = encoder_block_path[next_path_separator_idx:] normalized_path = f"vision_tower.vision_model.encoder.layers.{layer_idx}" if encoder_block_path.startswith("/LayerNorm"): normalized_path += ".layer_norm1" if path.endswith("_0") else ".layer_norm2" if prop == "scale": normalized_path += ".weight" updated_weights = weights.transpose() elif prop == "bias": normalized_path += ".bias" updated_weights = weights else: raise ValueError(f"Unexpected member, `{prop}`, for path `{path}`. Should be `bias` or `scale`.") elif encoder_block_path.startswith("/MlpBlock_0"): normalized_path += ".mlp.fc1" if "/Dense_0" in encoder_block_path else ".mlp.fc2" if prop == "kernel": normalized_path += ".weight" updated_weights = weights.transpose() elif prop == "bias": normalized_path += ".bias" updated_weights = weights else: raise ValueError(f"Unexpected member, `{prop}`, for path `{path}`. Should be `bias` or `kernel`.") elif encoder_block_path.startswith("/MultiHeadDotProductAttention_0"): if encoder_block_path.endswith("/key"): normalized_path += ".self_attn.k_proj" elif encoder_block_path.endswith("/out"): normalized_path += ".self_attn.out_proj" elif encoder_block_path.endswith("/query"): normalized_path += ".self_attn.q_proj" elif encoder_block_path.endswith("/value"): normalized_path += ".self_attn.v_proj" else: raise ValueError(f"Unexpected path `{path}` in SigLIP Transformer MultiHeadDotProductAttention_0.") if prop == "bias": normalized_path += ".bias" updated_weights = weights.reshape(-1, config.hidden_size).reshape(-1) elif prop == "kernel": normalized_path += ".weight" updated_weights = weights.reshape(-1, config.hidden_size).transpose() else: raise ValueError(f"Unexpected member, `{prop}`, for path `{path}`. Should be `bias` or `kernel`.") else: raise ValueError(f"Unexpected path `{path}` in SigLIP Transformer Encoder Block.") elif path == _SIGLIP_TRANSFORMER_ENCODER_NORM: if prop == "scale": normalized_path = "vision_tower.vision_model.post_layernorm.weight" updated_weights = weights.transpose() elif prop == "bias": normalized_path = "vision_tower.vision_model.post_layernorm.bias" updated_weights = weights else: raise ValueError(f"Unexpected member, `{prop}`, for path `{path}`. Should be `bias` or `scale`.") else: raise ValueError(f"Unexpected path `{path}`.") if "vision" in normalized_path: print(normalized_path) return normalized_path, updated_weights def convert_transformer_weights( config: Gemma3TextConfig, paths: Sequence[str], weights: np.ndarray, ) -> Iterator[tuple[str, np.ndarray]]: path, prop = paths if path.startswith(_TRANSFORMER_POST_TRAINING_PREFIX): path = path[_TRANSFORMER_POST_TRAINING_PREFIX_LEN:] converted_paths: list[str] = [] converted_weights: list[Any] = [] attn_head_dim = config.num_attention_heads * config.head_dim kv_head_dim = config.num_key_value_heads * config.head_dim if path == _TRANSFORMER_EMBEDDER: if prop == "input_embedding": # Tied to language_model.lm_head.weight, assigned at the end. converted_paths = ["language_model.model.embed_tokens.weight"] # Gemma3 model doesn't have image soft token in input and output embeddings, resize to avoid bugs we had with Mllama pre_expansion_embeddings = weights mu = np.mean(pre_expansion_embeddings, axis=0) sigma = np.cov(pre_expansion_embeddings, rowvar=False, bias=True) new_embeddings = np.random.multivariate_normal(mu, 1e-5 * sigma, size=64) weights = np.vstack([pre_expansion_embeddings, new_embeddings]) converted_weights = [weights] else: raise ValueError(f"Unexpected member, {prop}, in Embedder.") elif path.startswith(f"{_TRANSFORMER_EMBEDDER}/mm"): if path.endswith("/mm_input_projection"): converted_paths = ["multi_modal_projector.mm_input_projection_weight"] converted_weights = [weights] elif path.endswith("/mm_soft_embedding_norm"): converted_paths = ["multi_modal_projector.mm_soft_emb_norm.weight"] converted_weights = [weights] else: raise ValueError(f"Unexpected subpath, `{path}`, in Embedder.") elif path == _TRANSFORMER_FINAL_NORM: converted_paths = ["language_model.model.norm.weight"] converted_weights = [weights] elif path.startswith(_TRANSFORMER_DECODER_BLOCK): decoder_block_path = path[_TRANSFORMER_DECODER_BLOCK_LEN:] next_path_separator_idx = decoder_block_path.find("/") layer_idx = decoder_block_path[:next_path_separator_idx] decoder_block_path = decoder_block_path[next_path_separator_idx:] base_path = f"language_model.model.layers.{layer_idx}" if path.endswith("attn/attn_vec_einsum"): converted_paths = [f"{base_path}.self_attn.o_proj.weight"] converted_weights = [weights.transpose(2, 0, 1).reshape(config.hidden_size, attn_head_dim)] elif path.endswith("attn/_key_norm"): converted_paths = [f"{base_path}.self_attn.k_norm.weight"] converted_weights = [weights] elif path.endswith("attn/kv_einsum"): converted_paths = [ f"{base_path}.self_attn.k_proj.weight", f"{base_path}.self_attn.v_proj.weight", ] k_proj_weights, v_proj_weights = weights converted_weights = [ k_proj_weights.transpose(0, 2, 1).reshape(kv_head_dim, config.hidden_size), v_proj_weights.transpose(0, 2, 1).reshape(kv_head_dim, config.hidden_size), ] elif path.endswith("attn/q_einsum"): converted_paths = [f"{base_path}.self_attn.q_proj.weight"] converted_weights = [weights.transpose(0, 2, 1).reshape(attn_head_dim, config.hidden_size)] elif path.endswith("attn/_query_norm"): converted_paths = [f"{base_path}.self_attn.q_norm.weight"] converted_weights = [weights] elif path.endswith("mlp/gating_einsum"): converted_paths = [ f"{base_path}.mlp.gate_proj.weight", f"{base_path}.mlp.up_proj.weight", ] gate_proj_weight, up_proj_weight = weights converted_weights = [gate_proj_weight, up_proj_weight] elif path.endswith("mlp/linear"): converted_paths = [f"{base_path}.mlp.down_proj.weight"] converted_weights = [weights.transpose()] elif path.endswith("post_attention_norm"): converted_paths = [f"{base_path}.post_attention_layernorm.weight"] converted_weights = [weights] elif path.endswith("post_ffw_norm"): converted_paths = [f"{base_path}.post_feedforward_layernorm.weight"] converted_weights = [weights] elif path.endswith("pre_attention_norm"): converted_paths = [f"{base_path}.input_layernorm.weight"] converted_weights = [weights] elif path.endswith("pre_ffw_norm"): converted_paths = [f"{base_path}.pre_feedforward_layernorm.weight"] converted_weights = [weights] else: raise ValueError(f"Unexpected path `{path}` in Decoder Block.") else: raise ValueError(f"Unexpected path `{path}`.") if (cpl := len(converted_paths)) != (cwl := len(converted_weights)): raise ValueError( "The `converted_paths` and `converted_weights` should be the same " f"length. Got {cpl} and {cwl}, respectively, for {path}." ) return zip(converted_paths, converted_weights) def transpose_reshape(x: torch.Tensor) -> torch.Tensor: x = x.transpose(1, 2) return x.reshape(x.shape[0] * x.shape[1], x.shape[2]).contiguous() @dataclasses.dataclass(frozen=True) class ConversionResult: state_tree: dict[str, torch.Tensor] config: ShieldGemma2Config def convert( shieldgemma_checkpoint_path: str, gemma_checkpoint_path: str, config: ShieldGemma2Config, target_dtype: torch.dtype, ) -> ConversionResult: """Loads Orbax checkpoint from `input_path` and converts it to HF tree.""" checkpointer = obc.PyTreeCheckpointer() sg2_ckpt = checkpointer.restore(shieldgemma_checkpoint_path) g3_ckpt = checkpointer.restore(gemma_checkpoint_path) hf_tree: dict[str, torch.Tensor] = {} def update_tree(path: str, weights: np.ndarray) -> None: torch_tensor = torch.from_numpy(weights.astype("float32")).type(target_dtype) logging.info( "%s converted shape=%s with dtype=%s", path, weights.shape, torch_tensor.dtype, ) hf_tree[f"model.{path}"] = torch_tensor for paths, value in tree.flatten_with_path(g3_ckpt): if paths[0].startswith("SigLiPFromPatches_"): path, weights = convert_siglip_weight(config=config.vision_config, paths=paths, weights=value) update_tree(path, weights) for paths, value in tree.flatten_with_path(sg2_ckpt): for path, weights in convert_transformer_weights(config=config.text_config, paths=paths, weights=value): update_tree(path, weights) hf_tree["model.language_model.lm_head.weight"] = hf_tree["model.language_model.model.embed_tokens.weight"] return ConversionResult(state_tree=hf_tree, config=config) def main(*args): del args dtype = getattr(torch, PRECISION.value) output_path = OUTPUT_PATH.value tokenizer = GemmaTokenizerFast( TOKENIZER_PATH.value, extra_special_tokens={ "image_token": "<image_soft_token>", # Should be ID=262_144 "boi_token": "<start_of_image>", # Should be ID=255_999 "eoi_token": "<end_of_image>", # Should be ID=256_000 }, ) yes_token_index, no_token_index = torch.tensor(tokenizer(["Yes", "No"])["input_ids"])[:, 1].numpy() config = ShieldGemma2Config( yes_token_index=int(yes_token_index), no_token_index=int(no_token_index), text_config=Gemma3TextConfig( vocab_size=262_208, hidden_size=2560, intermediate_size=2560 * 8 // 2, num_attention_heads=8, head_dim=256, num_hidden_layers=34, num_key_value_heads=4, sliding_window=1024, rope_parameters={"rope_type": "linear", "factor": 8.0}, # used for global RoPE only rope_theta=1_000_000, rope_local_base_freq=10_000, attn_logit_softcapping=None, query_pre_attn_scalar=256, max_position_embeddings=8192, ), vision_config={ "hidden_size": 1152, "intermediate_size": 4304, "num_hidden_layers": 27, "num_attention_heads": 16, "num_channels": 3, "image_size": 896, "patch_size": 14, "hidden_act": "gelu_pytorch_tanh", "layer_norm_eps": 1e-6, "attention_dropout": 0.0, "vision_use_head": False, }, ) config.save_pretrained(output_path) image_processor = Gemma3ImageProcessor( image_seq_length=256, image_mean=(0.5,) * 3, image_std=(0.5,) * 3, size={"height": 896, "width": 896}, resample=PILImageResampling.BILINEAR, ) processor = ShieldGemma2Processor( image_processor=image_processor, tokenizer=tokenizer, policy_definitions=_SHIELDGEMMA2_POLICIES, ) tokenizer.chat_template = _CHAT_TEMPLATE processor.chat_template = _CHAT_TEMPLATE processor.save_pretrained(output_path) logging.info("Saved Shieldgemma2Processor to %s", output_path) del processor del tokenizer logging.info("Converting Shieldgemma2 @ %s", dtype) result = convert(_SHIELDGEMMA_CHECKPOINT_PATH.value, _GEMMA_CHECKPOINT_PATH.value, config, dtype) logging.info("Converted Shieldgemma2 state tree from Orbax to Hugging Face.") with accelerate.init_empty_weights(): model = ShieldGemma2ForImageClassification(config=config) model.load_state_dict(result.state_tree, assign=True, strict=True) model.config.dtype = dtype logging.info("Loaded Shieldgemma2 in Hugging Face Transformers.") model.save_pretrained(output_path) logging.info("Saved Shieldgemma2 to SafeTensors in %s", output_path) del model del result if __name__ == "__main__": app.run(main)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/shieldgemma2/processing_shieldgemma2.py
src/transformers/models/shieldgemma2/processing_shieldgemma2.py
# coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Mapping, Sequence from typing import Optional from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import Unpack from ...utils import logging from ..gemma3.processing_gemma3 import Gemma3Processor, Gemma3ProcessorKwargs logger = logging.get_logger(__name__) DEFAULT_SHIELDGEMMA2_POLICIES: Mapping[str, str] = { "dangerous": ( "No Dangerous Content: The image shall not contain content that" " facilitates or encourages activities that could cause real-world harm" " (e.g., building firearms and explosive devices, promotion of" " terrorism, instructions for suicide)." ), "sexual": ( "No Sexually Explicit content: The image shall not contain content that" " depicts explicit or graphic sexual acts (e.g., pornography, erotic" " nudity, depictions of rape or sexual assault)." ), "violence": ( "No Violence/Gore content: The image shall not contain content that" " depicts shocking, sensational, or gratuitous violence (e.g.," " excessive blood and gore, gratuitous violence against animals," " extreme injury or moment of death)." ), } class ShieldGemma2ProcessorKwargs(Gemma3ProcessorKwargs, total=False): policies: Optional[Sequence[str]] custom_policies: Optional[Mapping[str, str]] _defaults = { "text_kwargs": { "padding": True, }, "images_kwargs": { "do_pan_and_scan": False, }, } class ShieldGemma2Processor(Gemma3Processor): def __init__( self, image_processor, tokenizer, chat_template=None, image_seq_length=256, policy_definitions=None, **kwargs ): """A processor for the ShieldGemma 2 model. Args: image_processor: The image processor to use, typically a `Gemma3ImageProcessorFast` instance. tokenizer: The tokenizer to use, typically a `GemmaTokenizerFast` instance. chat_template: The chat template to use with this processor. Typically, this is unset as the processor configuration on Hugging Face Hub includes this value already. image_seq_length: The number of soft tokens per image. Typically, this is unset as the processor configuration on Hugging Face Hub includes this value already. policy_definitions: A mapping from policy name to its description in text used as the default policies to classify images against. The policy descriptions are included in the text of the prompts generated by this processor. Typically, this is unset as the processor configuration on Hugging Face Hub includes the base policies ShieldGemma was trained on. """ super().__init__(image_processor, tokenizer, chat_template, image_seq_length, **kwargs) if policy_definitions is None: self.policy_definitions = DEFAULT_SHIELDGEMMA2_POLICIES else: self.policy_definitions = policy_definitions def __call__( self, images: Optional[ImageInput] = None, text=None, **kwargs: Unpack[ShieldGemma2ProcessorKwargs], ) -> BatchFeature: """Generates a batch of inputs from the provided images. ShieldGemma was trained to classify image content for policy compliance using a specific prompt construction. This processor generates a batch of such prompts from the provided images by: 1. Creating a list of conversations, one for each `<image, policy>` pair; 2. Converting these conversations to text using `self.apply_chat_template()`; and 3. Encoding the conversations and images using the same techniques as `Gemma3Processor`. Args: images: A single image or a list of images to include in the batch. text: Not supported. videos: Not supported. audio: Not supported. kwargs: An optional dictionary of keyword arguments to configure the processor. Possible values include: * `custom_policies`: Additional policy definitions that augment the `self.policy_definitions` passed into the constructor. Note that `custom_policies` that share a key with `self.policy_definitions` will override the policy description * `policies`: (Optional) a list of keys in the joint `self.policy_definitions | custom_policies` dictionary of specific interest for the provided images. If empty or None, prompts will be generated for every key in the joint dictionary. Returns: A `BatchFeature` containing `input_ids`, `pixel_values`, etc. where each Tensor is of shape `(len(images) * len(policies), )`, and the order within the batch will be img1_policy1, ... img1_policyN, ... imgM_policyN. """ if not images: raise ValueError("ShieldGemma 2 needs images to classify") elif not isinstance(images, Sequence): images = [images] if not self.chat_template: raise ValueError("ShieldGemma 2 requires the use of a specific chat template") common_kwargs = kwargs.setdefault("common_kwargs", {}) if "return_tensors" in kwargs: common_kwargs["return_tensors"] = kwargs.pop("return_tensors") # Disable pan and scan images_kwargs = kwargs.setdefault("images_kwargs", {}) if images_kwargs.get("do_pan_and_scan") is True: logger.warning_once("ShieldGemma2 does not support pan and scan.") images_kwargs["do_pan_and_scan"] = False # Enable padding on the batch during tokenization text_kwargs = kwargs.setdefault("text_kwargs", {}) if "padding" not in text_kwargs: text_kwargs["padding"] = kwargs.pop("padding", True) text_kwargs["padding_side"] = kwargs.pop("padding_side", "left") policy_definitions: Mapping[str, str] = { **self.policy_definitions, **kwargs.get("custom_policies", {}), } if (policies := kwargs.get("policies")) is None: policies = list(policy_definitions.keys()) # TODO(ryanmullins): Support images from PIL or URLs. messages = [] expanded_images = [] for img in images: if not isinstance(img, list): img = [img] elif len(img) > 1: raise ValueError(f"SheildGemma can process at most one image per sample, but got {len(img)} images") for policy in policies: if img: messages.append( [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": policy_definitions[policy]}, ], } ] ) else: messages.append( [ { "role": "user", "content": [ {"type": "text", "text": policy_definitions[policy]}, ], } ] ) expanded_images.append(img) text = self.apply_chat_template(messages, tokenize=False) return super().__call__(images=expanded_images, text=text, **kwargs) __all__ = ["ShieldGemma2Processor"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/shieldgemma2/modeling_shieldgemma2.py
src/transformers/models/shieldgemma2/modeling_shieldgemma2.py
# coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Union import torch from ...cache_utils import Cache from ...modeling_outputs import ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import ( auto_docstring, logging, ) from ..auto import AutoModelForImageTextToText from .configuration_shieldgemma2 import ShieldGemma2Config logger = logging.get_logger(__name__) @dataclass class ShieldGemma2ImageClassifierOutputWithNoAttention(ImageClassifierOutputWithNoAttention): """ShieldGemma2 classifies imags as violative or not relative to a specific policy Args: """ probabilities: Optional[torch.Tensor] = None @auto_docstring class ShieldGemma2ForImageClassification(PreTrainedModel): config: ShieldGemma2Config input_modalities = ("image", "text") _checkpoint_conversion_mapping = { "model.language_model.model": "model.model.language_model", "model.vision_tower": "model.model.vision_tower", "model.multi_modal_projector": "model.model.multi_modal_projector", "model.language_model.lm_head": "model.lm_head", } def __init__(self, config: ShieldGemma2Config): super().__init__(config=config) self.yes_token_index = getattr(config, "yes_token_index", 10_784) self.no_token_index = getattr(config, "no_token_index", 3771) self.model = AutoModelForImageTextToText.from_config(config=config) self.post_init() def get_input_embeddings(self): return self.model.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.model.language_model.set_input_embeddings(value) def get_output_embeddings(self): return self.model.language_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.model.language_model.set_output_embeddings(new_embeddings) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, token_type_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **lm_kwargs, ) -> ShieldGemma2ImageClassifierOutputWithNoAttention: r""" Returns: A `ShieldGemma2ImageClassifierOutputWithNoAttention` instance containing the logits and probabilities associated with the model predicting the `Yes` or `No` token as the response to that prompt, captured in the following properties. * `logits` (`torch.Tensor` of shape `(batch_size, 2)`): The first position along dim=1 is the logits for the `Yes` token and the second position along dim=1 is the logits for the `No` token. * `probabilities` (`torch.Tensor` of shape `(batch_size, 2)`): The first position along dim=1 is the probability of predicting the `Yes` token and the second position along dim=1 is the probability of predicting the `No` token. ShieldGemma prompts are constructed such that predicting the `Yes` token means the content *does violate* the policy as described. If you are only interested in the violative condition, use `violated = outputs.probabilities[:, 1]` to extract that slice from the output tensors. When used with the `ShieldGemma2Processor`, the `batch_size` will be equal to `len(images) * len(policies)`, and the order within the batch will be img1_policy1, ... img1_policyN, ... imgM_policyN. """ outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, token_type_ids=token_type_ids, cache_position=cache_position, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, logits_to_keep=logits_to_keep, **lm_kwargs, ) logits = outputs.logits selected_logits = logits[:, -1, [self.yes_token_index, self.no_token_index]] probabilities = torch.softmax(selected_logits, dim=-1) return ShieldGemma2ImageClassifierOutputWithNoAttention( logits=selected_logits, probabilities=probabilities, ) __all__ = [ "ShieldGemma2ForImageClassification", ]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/shieldgemma2/__init__.py
src/transformers/models/shieldgemma2/__init__.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_shieldgemma2 import * from .modeling_shieldgemma2 import * from .processing_shieldgemma2 import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/shieldgemma2/configuration_shieldgemma2.py
src/transformers/models/shieldgemma2/configuration_shieldgemma2.py
# coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PreTrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class ShieldGemma2Config(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`ShieldGemma2ForImageClassification`]. It is used to instantiate an ShieldGemma2ForImageClassification according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the shieldgemma-2-4b-it. e.g. [google/gemma-3-4b](https://huggingface.co/google/gemma-3-4b) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`Union[ShieldGemma2TextConfig, dict]`, *optional*): The config object of the text backbone. vision_config (`Union[AutoConfig, dict]`, *optional*): Custom vision config or dict. mm_tokens_per_image (`int`, *optional*, defaults to 256): The number of tokens per image embedding. boi_token_index (`int`, *optional*, defaults to 255999): The begin-of-image token index to wrap the image prompt. eoi_token_index (`int`, *optional*, defaults to 256000): The end-of-image token index to wrap the image prompt. image_token_index (`int`, *optional*, defaults to 262144): The image token index to encode the image prompt. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import ShieldGemma2ForConditionalGeneration, ShieldGemma2Config, SiglipVisionConfig, ShieldGemma2TextConfig >>> # Initializing a Siglip-like vision config >>> vision_config = SiglipVisionConfig() >>> # Initializing a ShieldGemma2 Text config >>> text_config = ShieldGemma2TextConfig() >>> # Initializing a ShieldGemma2 gemma-3-4b style configuration >>> configuration = ShieldGemma2Config(vision_config, text_config) >>> # Initializing a model from the gemma-3-4b style configuration >>> model = ShieldGemma2TextConfig(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "shieldgemma2" attribute_map = { "image_token_id": "image_token_index", "boi_token_id": "boi_token_index", "eoi_token_id": "eoi_token_index", } sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig} def __init__( self, text_config=None, vision_config=None, mm_tokens_per_image: int = 256, boi_token_index: int = 255_999, eoi_token_index: int = 256_000, image_token_index: int = 262_144, initializer_range: float = 0.02, **kwargs, ): if isinstance(vision_config, dict): vision_config["model_type"] = vision_config.get("model_type", "siglip_vision_model") vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: vision_config = CONFIG_MAPPING["siglip_vision_model"]() self.vision_config = vision_config if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "gemma3_text") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["gemma3_text"]() self.text_config = text_config self.vision_config = vision_config self.mm_tokens_per_image = mm_tokens_per_image self.boi_token_index = boi_token_index self.eoi_token_index = eoi_token_index self.image_token_index = image_token_index self.initializer_range = initializer_range super().__init__(**kwargs) __all__ = ["ShieldGemma2Config"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py
src/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert mLUKE checkpoint.""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def convert_luke_checkpoint(checkpoint_path, metadata_path, entity_vocab_path, pytorch_dump_folder_path, model_size): # Load configuration defined in the metadata file with open(metadata_path) as metadata_file: metadata = json.load(metadata_file) config = LukeConfig(use_entity_aware_attention=True, **metadata["model_config"]) # Load in the weights from the checkpoint_path state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["module"] # Load the entity vocab file entity_vocab = load_original_entity_vocab(entity_vocab_path) # add an entry for [MASK2] entity_vocab["[MASK2]"] = max(entity_vocab.values()) + 1 config.entity_vocab_size += 1 tokenizer = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"]) # Add special tokens to the token vocabulary for downstream tasks entity_token_1 = AddedToken("<ent>", lstrip=False, rstrip=False) entity_token_2 = AddedToken("<ent2>", lstrip=False, rstrip=False) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_1, entity_token_2]}) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}") tokenizer.save_pretrained(pytorch_dump_folder_path) with open(os.path.join(pytorch_dump_folder_path, "tokenizer_config.json"), "r") as f: tokenizer_config = json.load(f) tokenizer_config["tokenizer_class"] = "MLukeTokenizer" with open(os.path.join(pytorch_dump_folder_path, "tokenizer_config.json"), "w") as f: json.dump(tokenizer_config, f) with open(os.path.join(pytorch_dump_folder_path, MLukeTokenizer.vocab_files_names["entity_vocab_file"]), "w") as f: json.dump(entity_vocab, f) tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path) # Initialize the embeddings of the special tokens ent_init_index = tokenizer.convert_tokens_to_ids(["@"])[0] ent2_init_index = tokenizer.convert_tokens_to_ids(["#"])[0] word_emb = state_dict["embeddings.word_embeddings.weight"] ent_emb = word_emb[ent_init_index].unsqueeze(0) ent2_emb = word_emb[ent2_init_index].unsqueeze(0) state_dict["embeddings.word_embeddings.weight"] = torch.cat([word_emb, ent_emb, ent2_emb]) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: decoder_bias = state_dict[bias_name] ent_decoder_bias = decoder_bias[ent_init_index].unsqueeze(0) ent2_decoder_bias = decoder_bias[ent2_init_index].unsqueeze(0) state_dict[bias_name] = torch.cat([decoder_bias, ent_decoder_bias, ent2_decoder_bias]) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers): for matrix_name in ["query.weight", "query.bias"]: prefix = f"encoder.layer.{layer_index}.attention.self." state_dict[prefix + "w2e_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2w_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2e_" + matrix_name] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks entity_emb = state_dict["entity_embeddings.entity_embeddings.weight"] entity_mask_emb = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0) state_dict["entity_embeddings.entity_embeddings.weight"] = torch.cat([entity_emb, entity_mask_emb]) # add [MASK2] for 'entity_predictions.bias' entity_prediction_bias = state_dict["entity_predictions.bias"] entity_mask_bias = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0) state_dict["entity_predictions.bias"] = torch.cat([entity_prediction_bias, entity_mask_bias]) model = LukeForMaskedLM(config=config).eval() state_dict.pop("entity_predictions.decoder.weight") state_dict.pop("lm_head.decoder.weight") state_dict.pop("lm_head.decoder.bias") state_dict_for_hugging_face = OrderedDict() for key in state_dict: if not (key.startswith("lm_head") or key.startswith("entity_predictions")): state_dict_for_hugging_face[f"luke.{key}"] = state_dict[key] else: state_dict_for_hugging_face[key] = state_dict[key] missing_keys, unexpected_keys = model.load_state_dict(state_dict_for_hugging_face, strict=False) if set(unexpected_keys) != {"luke.embeddings.position_ids"}: raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}") if set(missing_keys) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"Unexpected missing_keys: {missing_keys}") model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path, task="entity_classification") text = "ISO 639-3 uses the code fas for the dialects spoken across Iran and をフガニスタン (Afghanistan)." span = (0, 9) encoding = tokenizer(text, entity_spans=[span], return_tensors="pt") outputs = model(**encoding) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base expected_shape = torch.Size((1, 33, 768)) expected_slice = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]]) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base expected_shape = torch.Size((1, 1, 768)) expected_slice = torch.tensor([[-0.1482, 0.0609, 0.0322]]) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Verify masked word/entity prediction tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path) text = "Tokyo is the capital of <mask>." span = (24, 30) encoding = tokenizer(text, entity_spans=[span], return_tensors="pt") outputs = model(**encoding) input_ids = encoding["input_ids"][0].tolist() mask_position_id = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>")) predicted_id = outputs.logits[0][mask_position_id].argmax(dim=-1) assert "Japan" == tokenizer.decode(predicted_id) predicted_entity_id = outputs.entity_logits[0][0].argmax().item() multilingual_predicted_entities = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:")][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print(f"Saving PyTorch model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) def load_original_entity_vocab(entity_vocab_path): SPECIAL_TOKENS = ["[MASK]", "[PAD]", "[UNK]"] data = [json.loads(line) for line in open(entity_vocab_path)] new_mapping = {} for entry in data: entity_id = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: new_mapping[entity_name] = entity_id break new_entity_name = f"{language}:{entity_name}" new_mapping[new_entity_name] = entity_id return new_mapping if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) args = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mluke/__init__.py
src/transformers/models/mluke/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .tokenization_mluke import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mluke/tokenization_mluke.py
src/transformers/models/mluke/tokenization_mluke.py
# coding=utf-8 # Copyright 2021 Studio Ousia and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """Tokenization classes for mLUKE.""" import itertools import json import os from collections.abc import Mapping from typing import Optional, Union import numpy as np from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers from tokenizers.models import Unigram from ...tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, PaddingStrategy, TensorType, TextInput, TextInputPair, TruncationStrategy, to_py_obj, ) from ...tokenization_utils_tokenizers import TokenizersBackend from ...utils import add_end_docstrings, is_torch_tensor, logging logger = logging.get_logger(__name__) EntitySpan = tuple[int, int] EntitySpanInput = list[EntitySpan] Entity = str EntityInput = list[Entity] SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "entity_vocab_file": "entity_vocab.json"} ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **entity_ids** -- List of entity ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **entity_position_ids** -- List of entity positions in the input sequence to be fed to a model. - **entity_token_type_ids** -- List of entity token type ids to be fed to a model (when `return_token_type_ids=True` or if *"entity_token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **entity_attention_mask** -- List of indices specifying which entities should be attended to by the model (when `return_attention_mask=True` or if *"entity_attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **entity_start_positions** -- List of the start positions of entities in the word token sequence (when `task="entity_span_classification"`). - **entity_end_positions** -- List of the end positions of entities in the word token sequence (when `task="entity_span_classification"`). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`) """ class MLukeTokenizer(TokenizersBackend): """ Adapted from [`XLMRobertaTokenizer`] and [`LukeTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. entity_vocab_file (`str`): Path to the entity vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. task (`str`, *optional*): Task for which you want to prepare sequences. One of `"entity_classification"`, `"entity_pair_classification"`, or `"entity_span_classification"`. If you specify this argument, the entity sequence is automatically created based on the given entity span(s). max_entity_length (`int`, *optional*, defaults to 32): The maximum length of `entity_ids`. max_mention_length (`int`, *optional*, defaults to 30): The maximum number of tokens inside an entity span. entity_token_1 (`str`, *optional*, defaults to `<ent>`): The special token used to represent an entity span in a word token sequence. This token is only used when `task` is set to `"entity_classification"` or `"entity_pair_classification"`. entity_token_2 (`str`, *optional*, defaults to `<ent2>`): The special token used to represent an entity span in a word token sequence. This token is only used when `task` is set to `"entity_pair_classification"`. additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", task=None, max_entity_length=32, max_mention_length=30, entity_token_1="<ent>", entity_token_2="<ent2>", entity_unk_token="[UNK]", entity_pad_token="[PAD]", entity_mask_token="[MASK]", entity_mask2_token="[MASK2]", vocab: Optional[Union[str, dict, list]] = None, entity_vocab: Optional[Union[str, dict, list]] = None, **kwargs, ) -> None: # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token # we add 2 special tokens for downstream tasks entity_token_1 = ( AddedToken(entity_token_1, lstrip=False, rstrip=False) if isinstance(entity_token_1, str) else entity_token_1 ) entity_token_2 = ( AddedToken(entity_token_2, lstrip=False, rstrip=False) if isinstance(entity_token_2, str) else entity_token_2 ) # Handle entity vocab file for backward compatibility entity_vocab_file = kwargs.pop("entity_vocab_file", None) # Check if vocab/entity_vocab are in kwargs if vocab is None and "vocab" in kwargs: vocab = kwargs.pop("vocab") if entity_vocab is None and "entity_vocab" in kwargs: entity_vocab = kwargs.pop("entity_vocab") # Build vocab from data (list of (token, score) tuples) if isinstance(vocab, list): # vocab is list of (token, score) tuples from SentencePieceExtractor self._vocab = [(token, float(score)) for token, score in vocab] self._vocab_size = len(self._vocab) elif vocab is not None: self._vocab = vocab self._vocab_size = 0 else: # Create minimal vocab with <unk> to satisfy Unigram requirements self._vocab = [("<unk>", 0.0)] self._vocab_size = 0 # Will be updated when real vocab is loaded # Build Unigram tokenizer self._tokenizer = Tokenizer(Unigram(self._vocab, unk_id=0)) # Add SentencePiece-style normalization and pre-tokenization self._tokenizer.normalizer = normalizers.Sequence( [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), ] ) self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement="▁", prepend_scheme="always") self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme="always") # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 tokens self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.fairseq_tokens_to_ids["<mask>"] = self._vocab_size + self.fairseq_offset self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} # Load entity vocab if entity_vocab is not None: self.entity_vocab = entity_vocab elif entity_vocab_file is not None: with open(entity_vocab_file, encoding="utf-8") as entity_vocab_handle: self.entity_vocab = json.load(entity_vocab_handle) else: # Create minimal entity vocab with required special tokens self.entity_vocab = { entity_unk_token: 0, entity_pad_token: 1, entity_mask_token: 2, entity_mask2_token: 3, } for entity_special_token in [entity_unk_token, entity_pad_token, entity_mask_token, entity_mask2_token]: if entity_special_token not in self.entity_vocab: raise ValueError( f"Specified entity special token ``{entity_special_token}`` is not found in entity_vocab." ) self.entity_unk_token_id = self.entity_vocab[entity_unk_token] self.entity_pad_token_id = self.entity_vocab[entity_pad_token] self.entity_mask_token_id = self.entity_vocab[entity_mask_token] self.entity_mask2_token_id = self.entity_vocab[entity_mask2_token] self.task = task if task is None or task == "entity_span_classification": self.max_entity_length = max_entity_length elif task == "entity_classification": self.max_entity_length = 1 elif task == "entity_pair_classification": self.max_entity_length = 2 else: raise ValueError( f"Task {task} not supported. Select task from ['entity_classification', 'entity_pair_classification'," " 'entity_span_classification'] only." ) self.max_mention_length = max_mention_length # Handle extra/legacy special tokens (v4 compat). The fallback load path can pass # `additional_special_tokens` and/or `extra_special_tokens`, with entries serialized as dicts. extra_tokens: list[AddedToken | str] = [] for key in ("extra_special_tokens", "additional_special_tokens"): tokens = kwargs.pop(key, None) if isinstance(tokens, (list, tuple)): for token in tokens: extra_tokens.append(AddedToken(**token) if isinstance(token, dict) else token) # Ensure MLuke entity tokens are present exactly once. seen = {str(token) for token in extra_tokens} for token in (entity_token_1, entity_token_2): token_str = str(token) if token_str not in seen: extra_tokens.append(token) seen.add(token_str) # Also register entity masking/padding tokens so they survive save/load cycles. for token in (entity_unk_token, entity_pad_token, entity_mask_token, entity_mask2_token): if token not in seen: extra_tokens.append(AddedToken(token, lstrip=False, rstrip=False, normalized=False, special=True)) seen.add(token) kwargs["extra_special_tokens"] = extra_tokens super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, task=task, max_entity_length=max_entity_length, max_mention_length=max_mention_length, entity_token_1=str(entity_token_1), entity_token_2=str(entity_token_2), entity_unk_token=entity_unk_token, entity_pad_token=entity_pad_token, entity_mask_token=entity_mask_token, entity_mask2_token=entity_mask2_token, entity_vocab=entity_vocab if entity_vocab_file is None else None, # Only store if passed as data **kwargs, ) # Call _post_init for tokenizers created directly (not from_pretrained) self._post_init() def _post_init(self): """ Post-initialization to configure the post-processor for MLuke's special token format. """ super()._post_init() # Ensure the Python-side vocab metadata matches the fast tokenizer backend after loading self._vocab_size = self._tokenizer.get_vocab_size(with_added_tokens=False) self.fairseq_tokens_to_ids["<mask>"] = self._vocab_size + self.fairseq_offset self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} # Configure post processor for XLM-R/MLuke format: # single: <s> X </s> # pair: <s> A </s></s> B </s> from tokenizers import processors self._tokenizer.post_processor = processors.TemplateProcessing( single=f"{self.cls_token}:0 $A:0 {self.sep_token}:0", pair=f"{self.cls_token}:0 $A:0 {self.sep_token}:0 {self.sep_token}:0 $B:1 {self.sep_token}:1", special_tokens=[ (self.cls_token, self.cls_token_id), (self.sep_token, self.sep_token_id), ], ) @property def vocab_size(self): return self._vocab_size + self.fairseq_offset + 1 # Add the <mask> token def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] # Look up token in vocab token_id = self._tokenizer.token_to_id(token) # Need to return unknown token if not found (token_to_id returns None) return token_id + self.fairseq_offset if token_id is not None else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] token = self._tokenizer.id_to_token(index - self.fairseq_offset) return token if token is not None else self.unk_token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def num_special_tokens_to_add(self, pair: bool = False) -> int: """ Returns the number of added tokens when encoding a sequence with special tokens. Args: pair (`bool`, *optional*, defaults to `False`): Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence. Returns: `int`: Number of special tokens added to sequences. """ return 4 if pair else 2 @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, list[TextInput]], text_pair: Optional[Union[TextInput, list[TextInput]]] = None, entity_spans: Optional[Union[EntitySpanInput, list[EntitySpanInput]]] = None, entity_spans_pair: Optional[Union[EntitySpanInput, list[EntitySpanInput]]] = None, entities: Optional[Union[EntityInput, list[EntityInput]]] = None, entities_pair: Optional[Union[EntityInput, list[EntityInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, is_split_into_words: Optional[bool] = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # Check for seq2seq parameters that are not supported with entity-aware encoding if kwargs.get("text_target") is not None or kwargs.get("text_pair_target") is not None: if entity_spans is not None or entities is not None or self.task is not None: raise NotImplementedError( "text_target and text_pair_target are not supported when using entity-aware encoding. " "Please use the tokenizer without entities for seq2seq tasks." ) # Delegate to parent for seq2seq encoding return super().__call__( text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings. text_pair (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings. entity_spans (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*): The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify `"entity_classification"` or `"entity_pair_classification"` as the `task` argument in the constructor, the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each sequence must be equal to the length of each sequence of `entities`. entity_spans_pair (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*): The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify the `task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the length of each sequence must be equal to the length of each sequence of `entities_pair`. entities (`list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of each sequence must be equal to the length of each sequence of `entity_spans`. If you specify `entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity. entities_pair (`list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify `entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity. max_entity_length (`int`, *optional*): The maximum length of `entity_ids`. """ # Input type checking for clearer error is_valid_single_text = isinstance(text, str) is_valid_batch_text = isinstance(text, (list, tuple)) and ( len(text) == 0 or isinstance(text[0], (str, list, tuple)) ) if not (is_valid_single_text or is_valid_batch_text): raise ValueError("text input must be of type `str` (single example) or `list[str]` (batch).") is_valid_single_text_pair = isinstance(text_pair, str) is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and ( len(text_pair) == 0 or isinstance(text_pair[0], str) ) if not (text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair): raise ValueError("text_pair input must be of type `str` (single example) or `list[str]` (batch).") is_batched = bool(isinstance(text, (list, tuple))) # Get proper padding and truncation strategies padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) if is_batched: batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text if entities is None: batch_entities_or_entities_pairs = None else: batch_entities_or_entities_pairs = ( list(zip(entities, entities_pair)) if entities_pair is not None else entities ) if entity_spans is None: batch_entity_spans_or_entity_spans_pairs = None else: batch_entity_spans_or_entity_spans_pairs = ( list(zip(entity_spans, entity_spans_pair)) if entity_spans_pair is not None else entity_spans ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs, batch_entities_or_entities_pairs=batch_entities_or_entities_pairs, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self._encode_plus( text=text, text_pair=text_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, entities=entities, entities_pair=entities_pair, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus(
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/glm4v_moe/configuration_glm4v_moe.py
src/transformers/models/glm4v_moe/configuration_glm4v_moe.py
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/glm4v_moe/modular_glm4v_moe.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_glm4v_moe.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from ...configuration_utils import PreTrainedConfig from ...modeling_rope_utils import RopeParameters class Glm4vMoeVisionConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Glm4vMoeVisionModel`]. It is used to instantiate an Glm4vMoeVisionModel model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of GLM-4.1V-9B-Thinking [THUDM/GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking). Args: depth (`int`, *optional*, defaults to 24): Number of layers (depth) in the model. hidden_size (`int`, *optional*, defaults to 1536): Dimensionality of the encoder layers and the pooler layer. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. attention_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the queries, keys and values. attention_dropout (`float`, *optional*, defaults to 0.0): Dropout probability for attention weights. num_heads (`<fill_type>`, *optional*, defaults to 12): <fill_docstring> in_channels (`<fill_type>`, *optional*, defaults to 3): <fill_docstring> image_size (`int` or `list[int]`, *optional*, defaults to 336): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. spatial_merge_size (`int`, *optional*, defaults to 2): The size used for merging spatial dimensions. temporal_patch_size (`int`, *optional*, defaults to 2): The size used for patches along the temporal dimension. out_hidden_size (`int`, *optional*, defaults to 4096): The output hidden size of the vision model. intermediate_size (`int`, *optional*, defaults to 13696): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import Glm4vMoeVisionConfig, Glm4vMoeVisionModel >>> # Initializing a Glm4vMoeVisionConfig GLM-4.1V-9B style configuration >>> configuration = Glm4vMoeVisionConfig() >>> # Initializing a model (with random weights) from the GLM-4.1V-9B configuration >>> model = Glm4vMoeVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "glm4v_moe_vision" base_config_key = "vision_config" def __init__( self, depth=24, hidden_size=1536, hidden_act="silu", attention_bias=False, attention_dropout=0.0, num_heads=12, in_channels=3, image_size=336, patch_size=14, rms_norm_eps=1e-05, spatial_merge_size=2, temporal_patch_size=2, out_hidden_size=4096, intermediate_size=13696, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) self.depth = depth self.hidden_size = hidden_size self.hidden_act = hidden_act self.num_heads = num_heads self.in_channels = in_channels self.image_size = image_size self.patch_size = patch_size self.spatial_merge_size = spatial_merge_size self.temporal_patch_size = temporal_patch_size self.out_hidden_size = out_hidden_size self.intermediate_size = intermediate_size self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.attention_bias = attention_bias self.attention_dropout = attention_dropout class Glm4vMoeTextConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Glm4vMoeModel`]. It is used to instantiate a GLM-4.5V model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of GLM-4.5V [zai-org/GLM-4.5V](https://huggingface.co/zai-org/GLM-4.5V). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 151424): Vocabulary size of the Glm4vMoe model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Glm4vMoeModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 10944): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 46): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 96): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 65536): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, defaults to `True`, *optional*, defaults to `True`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. moe_intermediate_size (`int`, *optional*, defaults to 1408): Intermediate size of the routed expert. num_experts_per_tok (`int`, *optional*, defaults to 8): number of experts per token. n_shared_experts (`int`, *optional*, defaults to 1): Number of shared experts. n_routed_experts (`int`, *optional*, defaults to 128): Number of routed experts. routed_scaling_factor (`float`, *optional*, defaults to 1.0): Scaling factor or routed experts. n_group (`int`, *optional*, defaults to 1): Number of groups for routed experts. topk_group (`int`, *optional*, defaults to 1): Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups). first_k_dense_replace (`int`, *optional*, defaults to 1): Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head). \--k dense layers--/ norm_topk_prob (`bool`, *optional*, defaults to `True`): Whether to normalize the topk probabilities. router_aux_loss_coef (`float`, *optional*, defaults to 0.0001): The aux loss factor for the loss. ```python >>> from transformers import Glm4vMoeTextModel, Glm4vMoeConfig >>> # Initializing a GLM-4.5V style configuration >>> configuration = Glm4vMoeConfig() >>> # Initializing a model from the GLM-4.5V style configuration >>> model = Glm4vMoeTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "glm4v_moe_text" keys_to_ignore_at_inference = ["past_key_values"] # Default tensor parallel plan for base model `Glm4vMoe` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } attribute_map = { "num_local_experts": "n_routed_experts", } base_config_key = "text_config" def __init__( self, vocab_size: Optional[int] = 151424, hidden_size: Optional[int] = 4096, intermediate_size: Optional[int] = 10944, num_hidden_layers: Optional[int] = 46, num_attention_heads: Optional[int] = 96, num_key_value_heads: Optional[int] = 8, hidden_act: Optional[str] = "silu", max_position_embeddings: Optional[int] = 65536, initializer_range: Optional[float] = 0.02, rms_norm_eps: Optional[int] = 1e-5, use_cache: Optional[bool] = True, tie_word_embeddings: Optional[bool] = False, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, attention_bias: Optional[bool] = True, attention_dropout: Optional[float] = 0.0, moe_intermediate_size: Optional[int] = 1408, num_experts_per_tok: Optional[int] = 8, n_shared_experts: Optional[int] = 1, n_routed_experts: Optional[int] = 128, routed_scaling_factor: Optional[float] = 1.0, n_group: Optional[int] = 1, topk_group: Optional[int] = 1, first_k_dense_replace: Optional[int] = 1, norm_topk_prob: Optional[bool] = True, router_aux_loss_coef: Optional[float] = 0.0001, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.rope_parameters = rope_parameters kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC # MoE arguments self.moe_intermediate_size = moe_intermediate_size self.num_experts_per_tok = num_experts_per_tok self.n_group = n_group self.topk_group = topk_group self.n_shared_experts = n_shared_experts self.n_routed_experts = n_routed_experts self.routed_scaling_factor = routed_scaling_factor self.first_k_dense_replace = first_k_dense_replace self.norm_topk_prob = norm_topk_prob self.router_aux_loss_coef = router_aux_loss_coef super().__init__( tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope_section"}, **kwargs ) class Glm4vMoeConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Glm4vMoeModel`]. It is used to instantiate a GLM-4.5V model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of GLM-4.5V [zai-org/GLM-4.5V](https://huggingface.co/zai-org/GLM-4.5V). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vMoeTextConfig`): The config object or dictionary of the text backbone. vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vMoeVisionConfig`): The config object or dictionary of the vision backbone. image_token_id (`int`, *optional*, defaults to 151363): The image token index to encode the image prompt. video_token_id (`int`, *optional*, defaults to 151364): The video token index to encode the image prompt. image_start_token_id (`int`, *optional*, defaults to 151339): The image start token index to encode the start of image. image_end_token_id (`int`, *optional*, defaults to 151340): The image end token index to encode the end of image. video_start_token_id (`int`, *optional*, defaults to 151341): The video start token index to encode the start of video. video_end_token_id (`int`, *optional*, defaults to 151342): The video end token index to encode the end of video. ```python >>> from transformers import Glm4vMoeForConditionalGeneration, Glm4vMoeConfig >>> # Initializing a GLM-4.5V style configuration >>> configuration = Glm4vMoeConfig() >>> # Initializing a model from the GLM-4.5V style configuration >>> model = Glm4vMoeForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "glm4v_moe" sub_configs = {"vision_config": Glm4vMoeVisionConfig, "text_config": Glm4vMoeTextConfig} keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, text_config=None, vision_config=None, image_token_id=151363, video_token_id=151364, image_start_token_id=151339, image_end_token_id=151340, video_start_token_id=151341, video_end_token_id=151342, **kwargs, ): if isinstance(vision_config, dict): self.vision_config = self.sub_configs["vision_config"](**vision_config) elif vision_config is None: self.vision_config = self.sub_configs["vision_config"]() if isinstance(text_config, dict): self.text_config = self.sub_configs["text_config"](**text_config) elif text_config is None: self.text_config = self.sub_configs["text_config"](**kwargs) self.image_token_id = image_token_id self.video_token_id = video_token_id self.video_start_token_id = video_start_token_id self.video_end_token_id = video_end_token_id self.image_start_token_id = image_start_token_id self.image_end_token_id = image_end_token_id super().__init__(**kwargs) __all__ = ["Glm4vMoeConfig", "Glm4vMoeTextConfig", "Glm4vMoeVisionConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/glm4v_moe/modeling_glm4v_moe.py
src/transformers/models/glm4v_moe/modeling_glm4v_moe.py
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/glm4v_moe/modular_glm4v_moe.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_glm4v_moe.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from collections.abc import Callable from dataclasses import dataclass from typing import Any, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import LayerNorm from ... import initialization as init from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...integrations import use_kernel_forward_from_hub, use_kernelized_func from ...masking_utils import create_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ModelOutput, MoeModelOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torchdynamo_compiling from ...utils.generic import OutputRecorder, check_model_inputs, maybe_autocast from .configuration_glm4v_moe import Glm4vMoeConfig, Glm4vMoeTextConfig, Glm4vMoeVisionConfig @use_kernel_forward_from_hub("RMSNorm") class Glm4vMoeRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Glm4vMoeRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" @dataclass @auto_docstring( custom_intro=""" Base class for Llava outputs, with hidden states and attentions. """ ) class Glm4vMoeModelOutputWithPast(ModelOutput): r""" past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None class Glm4vMoeTextRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: Glm4vMoeTextConfig, device=None, layer_type=None): super().__init__() self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_type = self.config.rope_parameters["rope_type"] rope_init_fn: Callable = self.compute_default_rope_parameters if self.rope_type != "default": rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False) @staticmethod def compute_default_rope_parameters( config: Optional[Glm4vMoeTextConfig] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0) head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads dim = int(head_dim * partial_rotary_factor) attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): # In contrast to other models, GLM4V_MOE different position ids for the grids # So we expand the inv_freq to shape (3, ...) inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1) position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions) device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with maybe_autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) # Interleave them instead of usual shape cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1) sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1) # Keep half or full tensor for later concatenation rotary_dim = cos.shape[-1] q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] # Apply rotary embeddings on the first half or full tensor q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin) k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin) # Concatenate back to full shape q_embed = torch.cat([q_embed, q_pass], dim=-1) k_embed = torch.cat([k_embed, k_pass], dim=-1) return q_embed, k_embed def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1): """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/). Explanation: Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately. Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding. For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal, height and width) of text embedding is always the same, so the text embedding rotary position embedding has no difference with modern LLMs. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. mrope_section(`List(int)`): Multimodal rope section is for channel dimension of temporal, height and width in rope calculation. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ mrope_section = mrope_section * 2 cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) # Keep half or full tensor for later concatenation rotary_dim = cos.shape[-1] q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] # Apply rotary embeddings on the first half or full tensor q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin) k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin) # Concatenate back to full shape q_embed = torch.cat([q_embed, q_pass], dim=-1) k_embed = torch.cat([k_embed, k_pass], dim=-1) return q_embed, k_embed @use_kernelized_func(apply_rotary_pos_emb) class Glm4vMoeTextAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Glm4vMoeTextConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) self.rope_parameters = config.rope_parameters def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape) key_states = self.k_proj(hidden_states).view(hidden_shape) value_states = self.v_proj(hidden_states).view(hidden_shape) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_multimodal_rotary_pos_emb( # diff with Llama query_states, key_states, cos, sin, self.rope_parameters["mrope_section"] ) if past_key_values is not None: # sin and cos are specific to RoPE models; position_ids needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Glm4vMoeTextTopkRouter(nn.Module): def __init__(self, config: Glm4vMoeTextConfig): super().__init__() self.config = config self.top_k = config.num_experts_per_tok self.n_routed_experts = config.n_routed_experts self.routed_scaling_factor = config.routed_scaling_factor self.n_group = config.n_group self.topk_group = config.topk_group self.norm_topk_prob = config.norm_topk_prob self.weight = nn.Parameter(torch.empty((self.n_routed_experts, config.hidden_size))) self.register_buffer("e_score_correction_bias", torch.zeros((self.n_routed_experts), dtype=torch.float32)) def forward(self, hidden_states): hidden_states = hidden_states.view(-1, self.config.hidden_size) router_logits = F.linear(hidden_states.type(torch.float32), self.weight.type(torch.float32)) return router_logits class Glm4vMoeTextNaiveMoe(nn.Module): """Collection of expert weights stored as 3D tensors.""" def __init__(self, config): super().__init__() self.num_experts = config.num_local_experts self.hidden_dim = config.hidden_size self.intermediate_dim = config.moe_intermediate_size self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim)) self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim)) self.act_fn = ACT2FN[config.hidden_act] def forward( self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor, ) -> torch.Tensor: final_hidden_states = torch.zeros_like(hidden_states) with torch.no_grad(): expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts) expert_mask = expert_mask.permute(2, 1, 0) expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() for expert_idx in expert_hit: expert_idx = expert_idx[0] if expert_idx == self.num_experts: continue top_k_pos, token_idx = torch.where(expert_mask[expert_idx]) current_state = hidden_states[token_idx] gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1) current_hidden_states = self.act_fn(gate) * up current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx]) current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None] final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype)) return final_hidden_states class Glm4vMoeTextMoE(nn.Module): """ A mixed expert module containing shared experts. """ def __init__(self, config: Glm4vMoeTextConfig): super().__init__() self.config = config self.experts = Glm4vMoeTextNaiveMoe(config) self.gate = Glm4vMoeTextTopkRouter(config) self.shared_experts = Glm4vMoeTextMLP( config=config, intermediate_size=config.moe_intermediate_size * config.n_shared_experts ) self.n_routed_experts = config.n_routed_experts self.n_group = config.n_group self.topk_group = config.topk_group self.norm_topk_prob = config.norm_topk_prob self.routed_scaling_factor = config.routed_scaling_factor self.top_k = config.num_experts_per_tok def route_tokens_to_experts(self, router_logits): router_logits = router_logits.sigmoid() router_logits_for_choice = router_logits + self.gate.e_score_correction_bias group_scores = ( router_logits_for_choice.view(-1, self.n_group, self.n_routed_experts // self.n_group) .topk(2, dim=-1)[0] .sum(dim=-1) ) group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1] group_mask = torch.zeros_like(group_scores) group_mask.scatter_(1, group_idx, 1) score_mask = ( group_mask.unsqueeze(-1) .expand(-1, self.n_group, self.n_routed_experts // self.n_group) .reshape(-1, self.n_routed_experts) ) scores_for_choice = router_logits_for_choice.masked_fill(~score_mask.bool(), 0.0) topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1] topk_weights = router_logits.gather(1, topk_indices) if self.norm_topk_prob: denominator = topk_weights.sum(dim=-1, keepdim=True) + 1e-20 topk_weights /= denominator topk_weights = topk_weights * self.routed_scaling_factor return topk_indices, topk_weights def forward(self, hidden_states): residuals = hidden_states orig_shape = hidden_states.shape router_logits = self.gate(hidden_states) topk_indices, topk_weights = self.route_tokens_to_experts(router_logits) hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) hidden_states = self.experts(hidden_states, topk_indices, topk_weights).view(*orig_shape) hidden_states = hidden_states + self.shared_experts(residuals) return hidden_states class Glm4vMoeTextMLP(nn.Module): def __init__(self, config, intermediate_size=None): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj @use_kernel_forward_from_hub("RMSNorm") class Glm4vMoeTextRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Glm4vMoeTextRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class Glm4vMoeTextDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Glm4vMoeTextConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Glm4vMoeTextAttention(config=config, layer_idx=layer_idx) if layer_idx >= config.first_k_dense_replace: self.mlp = Glm4vMoeTextMoE(config) else: self.mlp = Glm4vMoeTextMLP(config) self.input_layernorm = Glm4vMoeTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Glm4vMoeTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring class Glm4vMoePreTrainedModel(PreTrainedModel): config: Glm4vMoeConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Glm4vMoeTextDecoderLayer", "Glm4vMoeVisionBlock"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = False _supports_attention_backend = True _can_record_outputs = { "hidden_states": Glm4vMoeTextDecoderLayer, "attentions": Glm4vMoeTextAttention, "router_logits": OutputRecorder(nn.Linear, layer_name="mlp.gate", index=0), } _keep_in_fp32_modules_strict = ["e_score_correction_bias"] input_modalities = ("text", "image", "video") @torch.no_grad() def _init_weights(self, module): super()._init_weights(module) if isinstance(module, Glm4vMoeTextTopkRouter): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) init.zeros_(module.e_score_correction_bias) elif isinstance(module, Glm4vMoeTextNaiveMoe): init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range) init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range) if isinstance(module, Glm4vMoeVisionRotaryEmbedding): inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim)) init.copy_(module.inv_freq, inv_freq) @dataclass @auto_docstring( custom_intro=""" Base class for Glm4vMoe causal language model (or autoregressive) outputs. """ ) class Glm4vMoeCausalLMOutputWithPast(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None aux_loss: Optional[torch.FloatTensor] = None class Glm4vMoeVisionRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, dim: int, theta: float = 10000.0) -> None: super().__init__() self.dim = dim self.theta = theta inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) def forward(self, seqlen: int) -> torch.Tensor: seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype) freqs = torch.outer(seq, self.inv_freq) return freqs class Glm4vMoeisionMlp(nn.Module): def __init__(self, config, bias: bool = False): super().__init__() self.hidden_size = config.hidden_size self.intermediate_size = config.out_hidden_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_state): return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state)) class Glm4vMoeVisionPatchEmbed(nn.Module): def __init__(self, config: Glm4vMoeVisionConfig) -> None: super().__init__() self.patch_size = config.patch_size self.temporal_patch_size = config.temporal_patch_size self.in_channels = config.in_channels self.embed_dim = config.hidden_size kernel_size = [self.temporal_patch_size, self.patch_size, self.patch_size] self.proj = nn.Conv3d(self.in_channels, self.embed_dim, kernel_size=kernel_size, stride=kernel_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: target_dtype = self.proj.weight.dtype hidden_states = hidden_states.view( -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size ) hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim) return hidden_states class Glm4vMoeVisionPatchMerger(nn.Module): def __init__(self, dim: int, context_dim: int, hidden_act: str, bias: bool = False) -> None: super().__init__() self.proj = nn.Linear(dim, dim, bias=bias) self.post_projection_norm = LayerNorm(dim) self.gate_proj = nn.Linear(dim, context_dim, bias=bias) self.up_proj = nn.Linear(dim, context_dim, bias=bias)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/glm4v_moe/__init__.py
src/transformers/models/glm4v_moe/__init__.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_glm4v_moe import * from .modeling_glm4v_moe import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/glm4v_moe/convert_glm4v_moe_mgt_weights_to_hf.py
src/transformers/models/glm4v_moe/convert_glm4v_moe_mgt_weights_to_hf.py
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import pickle import re from pathlib import Path import torch from safetensors.torch import save_file # Avoid Using Megatron Lib class UnpicklerWrapper(pickle.Unpickler): def find_class(self, mod_name, name): class DummyClass: def __init__(self, *args, **kwargs): pass if mod_name.startswith("megatron") or mod_name.startswith("glm") or mod_name.startswith("__main__"): return DummyClass return super().find_class(mod_name, name) pickle.Unpickler = UnpicklerWrapper def dict_access_multi(a_dict, keys): if len(keys) == 0: return a_dict return dict_access_multi(a_dict[keys[0]], keys[1:]) def merge_qkv( sd_list, original_tp, num_attention_heads, multi_query_group_num, attention_dim, interleaved_qkv, ): group_size = (num_attention_heads // multi_query_group_num + 2) * attention_dim q, k, v = [], [], [] for sd in sd_list: if interleaved_qkv: shape = sd.shape q_, k_, v_ = sd.view((multi_query_group_num // original_tp, group_size) + (shape[1:])).split( [ (num_attention_heads // multi_query_group_num * attention_dim), attention_dim, attention_dim, ], dim=1, ) q_ = q_.reshape((-1,) + (shape[1:])) k_ = k_.reshape((-1,) + (shape[1:])) v_ = v_.reshape((-1,) + (shape[1:])) else: q_, k_, v_ = sd.split( [ num_attention_heads * attention_dim // original_tp, multi_query_group_num * attention_dim // original_tp, multi_query_group_num * attention_dim // original_tp, ], dim=0, ) q.append(q_.clone()) k.append(k_.clone()) v.append(v_.clone()) q = torch.cat(q, dim=0) k = torch.cat(k, dim=0) v = torch.cat(v, dim=0) return q, k, v def merge_glu(sd_list): return torch.cat( [sd.chunk(dim=0, chunks=2)[0].clone() for sd in sd_list] + [sd.chunk(dim=0, chunks=2)[1].clone() for sd in sd_list], dim=0, ) def merge_glu_vit(sd_list, original_tp=None): if not isinstance(sd_list, list): sd_list = [sd_list] gate_proj = torch.cat([sd.chunk(dim=0, chunks=2)[0].clone() for sd in sd_list], dim=0) up_proj = torch.cat([sd.chunk(dim=0, chunks=2)[1].clone() for sd in sd_list], dim=0) return gate_proj, up_proj def split_glu(sd, cnt, idx): return torch.cat( ( sd.chunk(dim=0, chunks=2)[0].chunk(cnt, dim=0)[idx].clone(), sd.chunk(dim=0, chunks=2)[1].chunk(cnt, dim=0)[idx].clone(), ), dim=0, ) def find_expert_weight(input_dict, layer_num, fc1=True): if fc1: pattern = re.compile(rf"^decoder\.layers\.{layer_num}\.mlp\.experts\.linear_fc1\.weight(\d+)$") else: pattern = re.compile(rf"^decoder\.layers\.{layer_num}\.mlp\.experts\.linear_fc2\.weight(\d+)$") matched = [] for key in input_dict: match = pattern.match(key) if match: weight_num = int(match.group(1)) matched.append((weight_num, key)) matched.sort(key=lambda x: x[0]) weights = [None for _ in range(len(matched) * len(input_dict[matched[0][1]]))] for idx, key in matched: for i, weight in enumerate(input_dict[key]): weights[i * len(matched) + idx] = weight return weights def merge_tensors( tp_sd, keys, original_tp, target_tp, current_tp, slice_dim=None, merge_fn=None, ): cnt = original_tp // target_tp offset = cnt * current_tp sd_list = [dict_access_multi(tp_sd[i + offset], keys) for i in range(cnt)] if slice_dim is not None: return torch.cat(sd_list, dim=slice_dim) assert merge_fn is not None return merge_fn(sd_list) def save_sharded_model(state_dict, output_path, max_shard_size_gb=5, num_layers=46, vision_num_layers=24): os.makedirs(output_path, exist_ok=True) layered_dict = {} for layer_idx in range(num_layers): layer_key = f"layer_{layer_idx}" layered_dict[layer_key] = {} for key, value in state_dict.items(): if f"model.language_model.layers.{layer_idx}." in key: if isinstance(value, list): assert len(value) == 1, f"{key} {value}" value = value[0] layered_dict[layer_key][key] = value for layer_idx in range(vision_num_layers): layer_key = f"visual_layer_{layer_idx}" layered_dict[layer_key] = {} for key, value in state_dict.items(): if f"model.visual.blocks.{layer_idx}." in key: layered_dict[layer_key][key] = value layered_dict["others"] = {} for key, value in state_dict.items(): if not any(f"model.language_model.layers.{i}." in key for i in range(num_layers)) and not any( f"model.visual.blocks.{i}." in key for i in range(vision_num_layers) ): layered_dict["others"][key] = value # Determine layer ordering layer_order = [] for i in range(num_layers): layer_order.append(f"layer_{i}") for i in range(vision_num_layers): layer_order.append(f"visual_layer_{i}") layer_order.append("others") # Calculate sizes and create shards by layer param_sizes = {} shards = [] current_shard = {} current_shard_size = 0 max_shard_size_bytes = max_shard_size_gb * 1024 * 1024 * 1024 for layer_key in layer_order: layer_weights = layered_dict[layer_key] layer_size = sum(param.numel() * param.element_size() for param in layer_weights.values()) if current_shard_size + layer_size > max_shard_size_bytes and current_shard: shards.append(current_shard) current_shard = {} current_shard_size = 0 for param_name, param in layer_weights.items(): current_shard[param_name] = param current_shard_size += param.numel() * param.element_size() param_sizes[param_name] = param.numel() * param.element_size() if current_shard: shards.append(current_shard) index_dict = {"metadata": {"total_size": sum(param_sizes.values())}, "weight_map": {}} for i, shard in enumerate(shards): shard_filename = f"model-{i + 1:05d}-of-{len(shards):05d}.safetensors" shard_path = os.path.join(output_path, shard_filename) for param_name in shard: index_dict["weight_map"][param_name] = shard_filename save_file(shard, shard_path, metadata={"format": "pt"}) print(f"Saved shard {i + 1}/{len(shards)}: {shard_filename}") print(f" Shard size: {sum(p.numel() * p.element_size() for p in shard.values()) / (1024**3):.2f} GB") print(f" Keys in shard: {len(shard)}") index_path = os.path.join(output_path, "model.safetensors.index.json") with open(index_path, "w") as f: json.dump(index_dict, f, indent=2) return len(shards) def merge_tp_weights(model_path, output_path, vllm_config_path=None): origin_tp, origin_ep, origin_pp = -1, -1, -1 check_ep_or_pp_later = False for item in Path(model_path).iterdir(): if item.is_dir(): match = re.match(r"mp_rank_(\d{2})(?:_(\d{3}))?(?:_(\d{3}))?", item.name) if match: groups = match.groups() tp = int(groups[0]) origin_tp = max(origin_tp, tp + 1) # maybe TP-EP or TP-PP, need check later if groups[1] is not None and groups[2] is None: pp = int(groups[1]) origin_pp = max(origin_pp, pp + 1) origin_ep = 1 check_ep_or_pp_later = True elif groups[1] is not None and groups[2] is not None: pp = int(groups[1]) ep = int(groups[2]) origin_pp = max(origin_pp, pp + 1) origin_ep = max(origin_ep, ep + 1) else: origin_ep = 1 origin_pp = 1 tensor_names_by_file = {} mgt_sd = {} for item in Path(model_path).iterdir(): if item.is_dir(): match = re.match(r"mp_rank_(\d{2})(?:_(\d{3}))?(?:_(\d{3}))?$", item.name) if match: groups = match.groups() tp = int(groups[0]) pp = int(groups[1]) if groups[1] is not None else 0 ep = int(groups[2]) if groups[2] is not None else 0 file_path = item / "model_optim_rng.pt" assert file_path.exists(), f"model_optim_rng.pt not found in {item}" file_sd = torch.load(file_path, map_location="cpu", weights_only=False) for k in list(file_sd.keys()): if "_extra_state" in k or "dummy_parameter" in k: file_sd.pop(k) mgt_sd[(tp, pp, ep)] = file_sd tensor_names = set() if "model" in file_sd: for key in file_sd["model"].keys(): tensor_names.add(key) tensor_names_by_file[(tp, pp, ep)] = tensor_names change_pp_to_ep = False if check_ep_or_pp_later: prefix_distribution = {} for (tp, pp, ep), prefixes in tensor_names_by_file.items(): for prefix in prefixes: if prefix not in prefix_distribution: prefix_distribution[prefix] = set() prefix_distribution[prefix].add((tp, pp, ep)) for prefix, locations in prefix_distribution.items(): if len(locations) > 1: pp_values = {loc[1] for loc in locations} if len(pp_values) > 1: print(f"find '{prefix}' in multi ranks {pp_values} the parallelism should be TP-EP") origin_ep = origin_pp origin_pp = 1 change_pp_to_ep = True break else: print(f"find '{prefix}' only in one ep, parallelism should be TP-PP") break print(f"Detected tensor parallel degree TP={origin_tp} EP={origin_ep} PP={origin_pp}") if origin_tp <= 1 and origin_ep <= 1 and origin_pp <= 1: print("Model is already at TP=1 EP=1 PP=1, no need to merge") return assert max(origin_tp, origin_ep) * origin_pp == len(tensor_names_by_file), "maybe some problem in origin weight" organized_sd = {} for (tp, pp, ep), file_sd in mgt_sd.items(): if change_pp_to_ep: pp, ep = ep, pp organized_sd.setdefault(pp, {}) organized_sd[pp][(ep, tp)] = file_sd find_vpp = "model0" in file_sd # support VPP, if each pp rank has n vpp blocks, we will treat the original model # was parallel as pp n * origin_pp if find_vpp: organized_sd_vpp = {} for i in range(origin_pp): for (ep, tp), file_sd in organized_sd[i].items(): model_keys = sorted( [key for key in file_sd.keys() if key.startswith("model") and key[5:].isdigit()], key=lambda x: int(x[5:]), ) vp_blocks = len(model_keys) for idx, key in enumerate(model_keys): assert key in file_sd, f"model {key} not found" organized_sd_vpp.setdefault(idx * origin_pp + i, {}) organized_sd_vpp[idx * origin_pp + i][(ep, tp)] = {"model": file_sd[key]} origin_pp = origin_pp * vp_blocks organized_sd = organized_sd_vpp ignore_list = ["_extra_state", "dummy_parameter"] layer_share_list = [ "norm", "conv3d", "downsample", "router", "mlp.linear_fc2.bias", "self_attention.linear_proj.bias", "position_embeddings", ] full_weights = {} vit_layer_offset = 0 llm_layer_offset = 0 llm_layer_pattern = re.compile(r"^(decoder\.layers\.)(\d+)(\..*)$") vit_layer_pattern = re.compile(r"^(vision_model\.transformer\.layers\.)(\d+)(\..*)$") for pp in sorted(organized_sd.keys()): pp_dict = organized_sd[pp] next_llm_layer_offset = llm_layer_offset next_vit_layer_offset = vit_layer_offset ep_map = {} tp_map = {} tp_seen = set() for (ep, tp), item in pp_dict.items(): if tp not in tp_seen: tp_seen.add(tp) tp_map[tp] = item ep_map[ep] = item for tp in sorted(tp_map.keys()): sd = tp_map[tp] for full_name, tensor in sd["model"].items(): if any(x in full_name for x in ignore_list): continue llm_name_match = llm_layer_pattern.match(full_name) if llm_name_match: # Use a closure to avoid global variable issues def offset_layer(x, offset=llm_layer_offset): nonlocal next_llm_layer_offset _real_layer = int(x.group(2)) + offset next_llm_layer_offset = max(next_llm_layer_offset, _real_layer + 1) return f"{x.group(1)}{_real_layer}{x.group(3)}" full_name = llm_layer_pattern.sub(offset_layer, full_name) vit_name_match = vit_layer_pattern.match(full_name) if vit_name_match: # Use a closure to avoid global variable issues def offset_layer(x, offset=vit_layer_offset): nonlocal next_vit_layer_offset _real_layer = int(x.group(2)) + offset next_vit_layer_offset = max(next_vit_layer_offset, _real_layer + 1) return f"{x.group(1)}{_real_layer}{x.group(3)}" full_name = vit_layer_pattern.sub(offset_layer, full_name) if layer_share_list and any(x in full_name for x in layer_share_list): if full_name not in full_weights: full_weights[full_name] = tensor else: assert torch.equal(tensor, full_weights[full_name]), ( f"detect diff param in tp named: {full_name}" ) elif not re.search(r"\.experts\.", full_name): full_weights.setdefault(full_name, [None for _ in range(origin_tp)]) full_weights[full_name][tp] = tensor for ep in sorted(ep_map.keys()): sd = ep_map[ep] for full_name, tensor in sd["model"].items(): if any(x in full_name for x in ignore_list): continue name_match = llm_layer_pattern.match(full_name) if name_match: # Use a closure to avoid global variable issues def offset_layer(x, offset=llm_layer_offset): nonlocal next_llm_layer_offset _real_layer = int(x.group(2)) + offset next_llm_layer_offset = max(next_llm_layer_offset, _real_layer + 1) return f"{x.group(1)}{_real_layer}{x.group(3)}" full_name = llm_layer_pattern.sub(offset_layer, full_name) if re.search(r"\.experts\.", full_name): full_weights.setdefault(full_name, [None for _ in range(origin_ep)]) full_weights[full_name][ep] = tensor llm_layer_offset = next_llm_layer_offset vit_layer_offset = next_vit_layer_offset for k in sorted(full_weights.keys()): item = full_weights[k] if isinstance(item, list): print(f"{k} {len(item)} {item[0].shape} {item[0].dtype}", flush=True) else: print(f"{k} {item.shape} {item.dtype}", flush=True) print(f"Loading vLLM configuration file: {vllm_config_path}") with open(vllm_config_path, "r") as f: model_config = json.load(f) print(model_config) text_config = model_config.get("text_config", {}) vision_config = model_config.get("vision_config", {}) num_layers = text_config.get("num_hidden_layers", 46) llm_num_heads = text_config.get("num_attention_heads", 96) num_kv_heads = text_config.get("num_key_value_heads", 8) llm_attn_query_size = text_config.get("llm_attn_query_size", 12288) head_dim = text_config.get("attention_dim", llm_attn_query_size // llm_num_heads) vision_num_layers = vision_config.get("depth", 24) vit_n_head = vision_config.get("num_heads", 12) print( f"Model parameters: num_layers={num_layers}, vision_num_layers={vision_num_layers}, " f"num_heads={llm_num_heads}, multi_query_group_num={num_kv_heads}, llm_attn_query_size={llm_attn_query_size}" ) print("Merging tensor parallel weights...") interleaved_qkv = True num_attention_heads = llm_num_heads multi_query_group_num = num_kv_heads attention_dim = head_dim complete_state_dict = {} # LLM layer_i = 0 while f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" in full_weights: if f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" in full_weights: complete_state_dict[f"model.language_model.layers.{layer_i}.input_layernorm.weight"] = full_weights[ f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" ] if f"decoder.layers.{layer_i}.pre_mlp_layernorm.weight" in full_weights: complete_state_dict[f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight"] = ( full_weights[f"decoder.layers.{layer_i}.pre_mlp_layernorm.weight"] ) elif f"decoder.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight" in full_weights: complete_state_dict[f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight"] = ( full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight"] ) q, k, v = merge_qkv( sd_list=full_weights[f"decoder.layers.{layer_i}.self_attention.linear_qkv.weight"], original_tp=origin_tp, num_attention_heads=num_attention_heads, multi_query_group_num=multi_query_group_num, attention_dim=attention_dim, interleaved_qkv=interleaved_qkv, ) complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.q_proj.weight"] = q.clone() complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.k_proj.weight"] = k.clone() complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.v_proj.weight"] = v.clone() if f"decoder.layers.{layer_i}.self_attention.linear_qkv.bias" in full_weights: q_bias, k_bias, v_bias = merge_qkv( sd_list=full_weights[f"decoder.layers.{layer_i}.self_attention.linear_qkv.bias"], original_tp=origin_tp, num_attention_heads=num_attention_heads, multi_query_group_num=multi_query_group_num, attention_dim=attention_dim, interleaved_qkv=interleaved_qkv, ) complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.q_proj.bias"] = q_bias.clone() complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.k_proj.bias"] = k_bias.clone() complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.v_proj.bias"] = v_bias.clone() o_proj = torch.cat(full_weights[f"decoder.layers.{layer_i}.self_attention.linear_proj.weight"], dim=1) complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.o_proj.weight"] = o_proj.clone() if f"decoder.layers.{layer_i}.mlp.shared_experts.linear_fc1.weight" in full_weights: routed_expert_fc1_weights = find_expert_weight(full_weights, layer_i, fc1=True) for idx, weight in enumerate(routed_expert_fc1_weights): gate_proj_weight, up_proj_weight = merge_glu_vit([weight]) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.experts.{idx}.gate_proj.weight"] = ( gate_proj_weight.clone() ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.experts.{idx}.up_proj.weight"] = ( up_proj_weight.clone() ) routed_expert_fc2_weights = find_expert_weight(full_weights, layer_i, fc1=False) for idx, weight in enumerate(routed_expert_fc2_weights): complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.experts.{idx}.down_proj.weight"] = ( weight.clone() ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate.e_score_correction_bias"] = ( full_weights[f"decoder.layers.{layer_i}.mlp.router.expert_bias"] ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate.weight"] = full_weights[ f"decoder.layers.{layer_i}.mlp.router.weight" ] gate_proj_weight, up_proj_weight = merge_glu_vit( full_weights[f"decoder.layers.{layer_i}.mlp.shared_experts.linear_fc1.weight"] ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.shared_experts.gate_proj.weight"] = ( gate_proj_weight.clone() ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.shared_experts.up_proj.weight"] = ( up_proj_weight.clone() ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.shared_experts.down_proj.weight"] = ( full_weights[f"decoder.layers.{layer_i}.mlp.shared_experts.linear_fc2.weight"] ) else: # MLP - Use gate_up_proj gate_proj_weight, up_proj_weight = merge_glu_vit( full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc1.weight"] ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate_proj.weight"] = ( gate_proj_weight.clone() ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.up_proj.weight"] = up_proj_weight.clone() complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat( full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc2.weight"], dim=1 ) layer_i += 1 # Embedd Model, LM Head, and Norm embed_tokens = torch.cat(full_weights["embedding.word_embeddings.weight"], dim=0) complete_state_dict["model.language_model.embed_tokens.weight"] = embed_tokens.clone() lm_head = torch.cat(full_weights["output_layer.weight"], dim=0) complete_state_dict["lm_head.weight"] = lm_head.clone() complete_state_dict["model.language_model.norm.weight"] = full_weights["decoder.final_layernorm.weight"].clone() # VLM for layer_i in range(vision_num_layers): complete_state_dict[f"model.visual.blocks.{layer_i}.norm1.weight"] = full_weights[ f"vision_model.transformer.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" ] complete_state_dict[f"model.visual.blocks.{layer_i}.norm2.weight"] = full_weights[ f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight" ] q, k, v = merge_qkv( sd_list=full_weights[f"vision_model.transformer.layers.{layer_i}.self_attention.linear_qkv.weight"], original_tp=origin_tp, num_attention_heads=vit_n_head, multi_query_group_num=vit_n_head, attention_dim=attention_dim, interleaved_qkv=interleaved_qkv, ) complete_state_dict[f"model.visual.blocks.{layer_i}.attn.qkv.weight"] = torch.cat((q, k, v), dim=0) proj_weight = torch.cat( full_weights[f"vision_model.transformer.layers.{layer_i}.self_attention.linear_proj.weight"], dim=1 ) complete_state_dict[f"model.visual.blocks.{layer_i}.attn.proj.weight"] = proj_weight.clone() gate_proj_weight, up_proj_weight = merge_glu_vit( full_weights[f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc1.weight"] ) complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.gate_proj.weight"] = gate_proj_weight.clone() complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.up_proj.weight"] = up_proj_weight.clone() down_proj_weight = torch.cat( full_weights[f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc2.weight"], dim=1 ) complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.down_proj.weight"] = down_proj_weight.clone() complete_state_dict["model.visual.downsample.weight"] = ( full_weights["vision_model.downsample.weight"].clone().contiguous() ) complete_state_dict["model.visual.downsample.bias"] = ( full_weights["vision_model.downsample.bias"].clone().contiguous() ) # Merger gate_proj, up_proj = merge_glu_vit(full_weights["vision_projection.encoder.linear_fc1.weight"]) down_proj = torch.cat(full_weights["vision_projection.encoder.linear_fc2.weight"], dim=1) proj = torch.cat(full_weights["vision_projection.linear_fc_extra.weight"], dim=0) complete_state_dict["model.visual.merger.gate_proj.weight"] = gate_proj.clone().contiguous() complete_state_dict["model.visual.merger.up_proj.weight"] = up_proj.clone().contiguous() complete_state_dict["model.visual.merger.down_proj.weight"] = down_proj.clone().contiguous() complete_state_dict["model.visual.merger.proj.weight"] = proj.clone().contiguous() if "vision_projection.layer_norm.weight" in full_weights: complete_state_dict["model.visual.merger.post_projection_norm.weight"] = full_weights[ "vision_projection.layer_norm.weight" ] if "vision_projection.layer_norm.bias" in full_weights: complete_state_dict["model.visual.merger.post_projection_norm.bias"] = full_weights[ "vision_projection.layer_norm.bias" ] complete_state_dict["model.visual.embeddings.position_embedding.weight"] = ( full_weights["vision_model.position_embeddings.weight"].clone().contiguous() ) complete_state_dict["model.visual.patch_embed.proj.weight"] = ( full_weights["vision_model.conv3d.weight"].clone().contiguous() ) complete_state_dict["model.visual.patch_embed.proj.bias"] = ( full_weights["vision_model.conv3d.bias"].clone().contiguous() ) # Check for additional vision model norm layers mentioned in the expected output if "vision_model.post_conv_layernorm.weight" in full_weights: complete_state_dict["model.visual.post_conv_layernorm.weight"] = ( full_weights["vision_model.post_conv_layernorm.weight"].clone().contiguous() ) if "vision_model.post_layernorm.weight" in full_weights: complete_state_dict["model.visual.post_layernorm.weight"] = ( full_weights["vision_model.post_layernorm.weight"].clone().contiguous() ) print(f"Total keys in state dict: {len(complete_state_dict)}") print("bias use Float32") save_sharded_model( complete_state_dict, output_path=output_path, max_shard_size_gb=5, num_layers=num_layers, vision_num_layers=vision_num_layers, ) hf_config = { "architectures": ["Glm4vMoeForConditionalGeneration"], "model_type": "glm4v_moe", "image_start_token_id": model_config.get("image_start_token_id", 151339), "image_end_token_id": model_config.get("image_end_token_id", 151340), "video_start_token_id": model_config.get("video_start_token_id", 151341), "video_end_token_id": model_config.get("video_end_token_id", 151342), "transformers_version": "4.57.0.dev0", } txt_config = { "model_type": "glm4v_moe_text", "attention_bias": model_config.get("add_qkv_bias", True), "use_qk_norm": model_config.get("use_qk_norm", False), "attention_dropout": 0.0, "pad_token_id": model_config.get("pad_token_id", 151329), "eos_token_id": model_config.get("eos_token_id", [151329, 151336, 151338]), "image_token_id": model_config.get("image_token_id", 151363), "video_token_id": model_config.get("video_token_id", 151364), "hidden_act": text_config.get("hidden_act", "silu"), "hidden_size": text_config.get("hidden_size", 4096), "initializer_range": 0.02, "intermediate_size": text_config.get("intermediate_size", 10944), "max_position_embeddings": text_config.get("seq_length", 131072), "num_attention_heads": text_config.get("num_attention_heads", 96), "num_hidden_layers": text_config.get("num_layers", 46), "num_key_value_heads": text_config.get("multi_query_group_num", 2), "rms_norm_eps": text_config.get("layernorm_epsilon", 1e-05), "dtype": text_config.get("torch_dtype", "bfloat16"), "use_cache": text_config.get("use_cache", True), "vocab_size": text_config.get("vocab_size", 151424), "partial_rotary_factor": 0.5, "tie_word_embeddings": False, "moe_intermediate_size": text_config.get("moe_intermediate_size", 1408), "n_group": text_config.get("n_group", 1), "n_routed_experts": text_config.get("n_routed_experts", 128), "n_shared_experts": text_config.get("n_shared_experts", 1), "norm_topk_prob": text_config.get("norm_topk_prob", True), "num_experts_per_tok": text_config.get("num_experts_per_tok", 8), "rope_parameters": { "rope_type": "default", "rope_theta": 10000.0, "mrope_section": [8, 12, 12], "partial_rotary_factor": 0.5, }, } hf_config["text_config"] = txt_config if "vision_config" in model_config: vision_config = { "model_type": "glm4v_moe_vision", "hidden_size": model_config["vision_config"].get("hidden_size", 1536), "depth": model_config["vision_config"].get("num_layers", 24), "num_heads": model_config["vision_config"].get("num_attention_heads", 12), "attention_bias": model_config["vision_config"].get("attention_bias", False), "intermediate_size": model_config.get("ffn_hidden_size", 13696), "hidden_act": model_config["vision_config"].get("hidden_act", "silu"), "hidden_dropout_prob": model_config["vision_config"].get("hidden_dropout_prob", 0.0), "initializer_range": 0.02, "image_size": model_config["vision_config"].get("image_size", 336), "patch_size": model_config["vision_config"].get("patch_size", 14), "out_hidden_size": model_config.get("hidden_size", 4096), "rms_norm_eps": model_config["vision_config"].get("layernorm_epsilon", 1e-05), "spatial_merge_size": model_config["vision_config"].get("downsample_ratio", 2), "temporal_patch_size": model_config["vision_config"].get("t_patch", 2), } hf_config["vision_config"] = vision_config config_path = os.path.join(output_path, "config.json") with open(config_path, "w") as f: json.dump(hf_config, f, indent=2) print(f"Conversion complete! Model saved to {output_path}") def parse_args(): parser = argparse.ArgumentParser(description="Convert Megatron model to HuggingFace format") parser.add_argument(
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/glm4v_moe/modular_glm4v_moe.py
src/transformers/models/glm4v_moe/modular_glm4v_moe.py
# coding=utf-8 # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Callable from typing import Optional, Union import torch import torch.nn as nn from ... import initialization as init from ...cache_utils import Cache, DynamicCache from ...configuration_utils import PreTrainedConfig from ...masking_utils import create_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import MoeModelOutputWithPast from ...modeling_rope_utils import RopeParameters, RotaryEmbeddingConfigMixin from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging from ...utils.generic import OutputRecorder, check_model_inputs from ..deepseek_v3.modeling_deepseek_v3 import DeepseekV3NaiveMoe from ..glm4.modeling_glm4 import Glm4Attention from ..glm4_moe.configuration_glm4_moe import Glm4MoeConfig from ..glm4_moe.modeling_glm4_moe import ( Glm4MoeDecoderLayer, Glm4MoeMLP, Glm4MoeMoE, Glm4MoePreTrainedModel, Glm4MoeRMSNorm, Glm4MoeTopkRouter, eager_attention_forward, ) from ..glm4v.configuration_glm4v import Glm4vConfig, Glm4vVisionConfig from ..glm4v.modeling_glm4v import ( Glm4vForConditionalGeneration, Glm4vTextModel, Glm4vTextRotaryEmbedding, Glm4vVisionModel, Glm4vVisionRotaryEmbedding, rotate_half, ) from ..qwen3_vl_moe.modeling_qwen3_vl_moe import ( Qwen3VLMoeCausalLMOutputWithPast, Qwen3VLMoeModelOutputWithPast, load_balancing_loss_func, ) logger = logging.get_logger(__name__) class Glm4vMoeVisionConfig(Glm4vVisionConfig): pass class Glm4vMoeRMSNorm(Glm4MoeRMSNorm): pass class Glm4vMoeTextConfig(Glm4MoeConfig, RotaryEmbeddingConfigMixin): r""" This is the configuration class to store the configuration of a [`Glm4vMoeModel`]. It is used to instantiate a GLM-4.5V model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of GLM-4.5V [zai-org/GLM-4.5V](https://huggingface.co/zai-org/GLM-4.5V). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 151424): Vocabulary size of the Glm4vMoe model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Glm4vMoeModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 10944): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 46): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 96): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 65536): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, defaults to `True`, *optional*, defaults to `True`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. moe_intermediate_size (`int`, *optional*, defaults to 1408): Intermediate size of the routed expert. num_experts_per_tok (`int`, *optional*, defaults to 8): number of experts per token. n_shared_experts (`int`, *optional*, defaults to 1): Number of shared experts. n_routed_experts (`int`, *optional*, defaults to 128): Number of routed experts. routed_scaling_factor (`float`, *optional*, defaults to 1.0): Scaling factor or routed experts. n_group (`int`, *optional*, defaults to 1): Number of groups for routed experts. topk_group (`int`, *optional*, defaults to 1): Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups). first_k_dense_replace (`int`, *optional*, defaults to 1): Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head). \--k dense layers--/ norm_topk_prob (`bool`, *optional*, defaults to `True`): Whether to normalize the topk probabilities. router_aux_loss_coef (`float`, *optional*, defaults to 0.0001): The aux loss factor for the loss. ```python >>> from transformers import Glm4vMoeTextModel, Glm4vMoeConfig >>> # Initializing a GLM-4.5V style configuration >>> configuration = Glm4vMoeConfig() >>> # Initializing a model from the GLM-4.5V style configuration >>> model = Glm4vMoeTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "glm4v_moe_text" base_config_key = "text_config" keys_to_ignore_at_inference = ["past_key_values"] # Default tensor parallel plan for base model `Glm4vMoe` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: Optional[int] = 151424, hidden_size: Optional[int] = 4096, intermediate_size: Optional[int] = 10944, num_hidden_layers: Optional[int] = 46, num_attention_heads: Optional[int] = 96, num_key_value_heads: Optional[int] = 8, hidden_act: Optional[str] = "silu", max_position_embeddings: Optional[int] = 65536, initializer_range: Optional[float] = 0.02, rms_norm_eps: Optional[int] = 1e-5, use_cache: Optional[bool] = True, tie_word_embeddings: Optional[bool] = False, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, attention_bias: Optional[bool] = True, attention_dropout: Optional[float] = 0.0, moe_intermediate_size: Optional[int] = 1408, num_experts_per_tok: Optional[int] = 8, n_shared_experts: Optional[int] = 1, n_routed_experts: Optional[int] = 128, routed_scaling_factor: Optional[float] = 1.0, n_group: Optional[int] = 1, topk_group: Optional[int] = 1, first_k_dense_replace: Optional[int] = 1, norm_topk_prob: Optional[bool] = True, router_aux_loss_coef: Optional[float] = 0.0001, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.rope_parameters = rope_parameters kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC # MoE arguments self.moe_intermediate_size = moe_intermediate_size self.num_experts_per_tok = num_experts_per_tok self.n_group = n_group self.topk_group = topk_group self.n_shared_experts = n_shared_experts self.n_routed_experts = n_routed_experts self.routed_scaling_factor = routed_scaling_factor self.first_k_dense_replace = first_k_dense_replace self.norm_topk_prob = norm_topk_prob self.router_aux_loss_coef = router_aux_loss_coef PreTrainedConfig.__init__( self, tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope_section"}, **kwargs ) class Glm4vMoeConfig(Glm4vConfig): r""" This is the configuration class to store the configuration of a [`Glm4vMoeModel`]. It is used to instantiate a GLM-4.5V model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of GLM-4.5V [zai-org/GLM-4.5V](https://huggingface.co/zai-org/GLM-4.5V). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vMoeTextConfig`): The config object or dictionary of the text backbone. vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vMoeVisionConfig`): The config object or dictionary of the vision backbone. image_token_id (`int`, *optional*, defaults to 151363): The image token index to encode the image prompt. video_token_id (`int`, *optional*, defaults to 151364): The video token index to encode the image prompt. image_start_token_id (`int`, *optional*, defaults to 151339): The image start token index to encode the start of image. image_end_token_id (`int`, *optional*, defaults to 151340): The image end token index to encode the end of image. video_start_token_id (`int`, *optional*, defaults to 151341): The video start token index to encode the start of video. video_end_token_id (`int`, *optional*, defaults to 151342): The video end token index to encode the end of video. ```python >>> from transformers import Glm4vMoeForConditionalGeneration, Glm4vMoeConfig >>> # Initializing a GLM-4.5V style configuration >>> configuration = Glm4vMoeConfig() >>> # Initializing a model from the GLM-4.5V style configuration >>> model = Glm4vMoeForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" def __init__( self, text_config=None, vision_config=None, image_token_id=151363, video_token_id=151364, image_start_token_id=151339, image_end_token_id=151340, video_start_token_id=151341, video_end_token_id=151342, **kwargs, ): super().__init__() class Glm4vMoeModelOutputWithPast(Qwen3VLMoeModelOutputWithPast): pass def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1): """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/). Explanation: Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately. Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding. For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal, height and width) of text embedding is always the same, so the text embedding rotary position embedding has no difference with modern LLMs. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. mrope_section(`List(int)`): Multimodal rope section is for channel dimension of temporal, height and width in rope calculation. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ mrope_section = mrope_section * 2 cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) # Keep half or full tensor for later concatenation rotary_dim = cos.shape[-1] q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] # Apply rotary embeddings on the first half or full tensor q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin) k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin) # Concatenate back to full shape q_embed = torch.cat([q_embed, q_pass], dim=-1) k_embed = torch.cat([k_embed, k_pass], dim=-1) return q_embed, k_embed class Glm4vMoeTextRotaryEmbedding(Glm4vTextRotaryEmbedding): def __init__(self, config: Glm4vMoeTextConfig, device=None, layer_type=None): super().__init__(config, device=device, layer_type=layer_type) @staticmethod def compute_default_rope_parameters( config: Optional[Glm4vMoeTextConfig] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0) head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads dim = int(head_dim * partial_rotary_factor) attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor class Glm4vMoeTextAttention(Glm4Attention): def __init__(self, config: Glm4vMoeTextConfig, layer_idx: Optional[int] = None): super().__init__(config, layer_idx) self.rope_parameters = config.rope_parameters def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape) key_states = self.k_proj(hidden_states).view(hidden_shape) value_states = self.v_proj(hidden_states).view(hidden_shape) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_multimodal_rotary_pos_emb( # diff with Llama query_states, key_states, cos, sin, self.rope_parameters["mrope_section"] ) if past_key_values is not None: # sin and cos are specific to RoPE models; position_ids needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Glm4vMoeTextTopkRouter(Glm4MoeTopkRouter, nn.Module): def __init__(self, config: Glm4vMoeTextConfig): super().__init__(config) class Glm4vMoeTextNaiveMoe(DeepseekV3NaiveMoe): pass class Glm4vMoeTextMoE(Glm4MoeMoE): def __init__(self, config: Glm4vMoeTextConfig): super().__init__(config) self.config = config self.experts = Glm4vMoeTextNaiveMoe(config) self.gate = Glm4vMoeTextTopkRouter(config) self.shared_experts = Glm4vMoeTextMLP( config=config, intermediate_size=config.moe_intermediate_size * config.n_shared_experts ) class Glm4vMoeTextMLP(Glm4MoeMLP): pass class Glm4vMoeTextDecoderLayer(Glm4MoeDecoderLayer): def __init__(self, config: Glm4vMoeTextConfig, layer_idx: int): super().__init__(config, layer_idx) class Glm4vMoePreTrainedModel(Glm4MoePreTrainedModel): config: Glm4vMoeConfig base_model_prefix = "model" input_modalities = ("text", "image", "video") _no_split_modules = ["Glm4vMoeTextDecoderLayer", "Glm4vMoeVisionBlock"] _skip_keys_device_placement = "past_key_values" _can_record_outputs = { "hidden_states": Glm4vMoeTextDecoderLayer, "attentions": Glm4vMoeTextAttention, "router_logits": OutputRecorder(nn.Linear, layer_name="mlp.gate", index=0), } def _init_weights(self, module): super()._init_weights(module) if isinstance(module, Glm4vMoeVisionRotaryEmbedding): inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim)) init.copy_(module.inv_freq, inv_freq) class Glm4vMoeCausalLMOutputWithPast(Qwen3VLMoeCausalLMOutputWithPast): pass class Glm4vMoeVisionRotaryEmbedding(Glm4vVisionRotaryEmbedding): pass @auto_docstring class Glm4vMoeVisionModel(Glm4vVisionModel): pass @auto_docstring class Glm4vMoeTextModel(Glm4vTextModel): def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") # torch.jit.trace() doesn't support cache objects in the output if use_cache and past_key_values is None and not torch.jit.is_tracing(): past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) # the hard coded `3` is for temporal, height and width. if position_ids is None: position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1) elif position_ids.ndim == 2: position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) # NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions # where each dim indicates visual spatial positions for temporal/height/width grids. # There are two scenarios when FA2-like packed masking might be activated. # 1. User specifically passed packed `position_ids` and no attention mask. # In this case we expect the useer to create correct position ids for all 3 grids # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len] # 2. User runs forward with no attention mask and no position ids. In this case, position ids # are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are # prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass # text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation` if position_ids.ndim == 3 and position_ids.shape[0] == 4: text_position_ids = position_ids[0] position_ids = position_ids[1:] else: # If inputs are not packed (usual 3D positions), do not prepare mask from position_ids text_position_ids = None mask_kwargs = { "config": self.config, "input_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": text_position_ids, } # Create the masks causal_mask = create_causal_mask(**mask_kwargs) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for i, decoder_layer in enumerate(self.layers[: self.config.num_hidden_layers]): layer_outputs = decoder_layer( hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) hidden_states = layer_outputs hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) class Glm4vMoeForConditionalGeneration(Glm4vForConditionalGeneration): @auto_docstring @check_model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.Tensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Glm4vMoeCausalLMOutputWithPast]: outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size) aux_loss = None if kwargs.get("output_router_logits", False): aux_loss = load_balancing_loss_func( outputs.router_logits, self.num_experts, self.num_experts_per_tok, attention_mask, ) if labels is not None: loss += self.config.text_config.router_aux_loss_coef * aux_loss.to( loss.device ) # make sure to reside in the same device return Glm4vMoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=outputs.rope_deltas, ) __all__ = [ "Glm4vMoeConfig", "Glm4vMoeTextConfig", "Glm4vMoeVisionConfig", "Glm4vMoeForConditionalGeneration", "Glm4vMoeModel", # noqa: F822 "Glm4vMoePreTrainedModel", "Glm4vMoeTextModel", "Glm4vMoeVisionModel", ]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert/tokenization_bert.py
src/transformers/models/bert/tokenization_bert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Bert.""" import collections from typing import Optional, Union from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers, processors from tokenizers.models import WordPiece from ...tokenization_utils_tokenizers import TokenizersBackend from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab class BertTokenizer(TokenizersBackend): r""" Construct a BERT tokenizer (backed by HuggingFace's tokenizers library). Based on WordPiece. This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab (`str` or `dict[str, int]`, *optional*): Custom vocabulary dictionary. If not provided, vocabulary is loaded from `vocab_file`. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "token_type_ids", "attention_mask"] model = WordPiece def __init__( self, vocab: Optional[Union[str, dict[str, int]]] = None, do_lower_case: bool = False, unk_token: str = "[UNK]", sep_token: str = "[SEP]", pad_token: str = "[PAD]", cls_token: str = "[CLS]", mask_token: str = "[MASK]", tokenize_chinese_chars: bool = True, strip_accents: Optional[bool] = None, **kwargs, ): self.do_lower_case = do_lower_case self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents if vocab is None: vocab = { str(pad_token): 0, str(unk_token): 1, str(cls_token): 2, str(sep_token): 3, str(mask_token): 4, } self._vocab = vocab self._tokenizer = Tokenizer(WordPiece(self._vocab, unk_token=str(unk_token))) self._tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) self._tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() self._tokenizer.decoder = decoders.WordPiece(prefix="##") super().__init__( do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) cls_token_id = self.cls_token_id if self.cls_token_id is not None else 2 sep_token_id = self.sep_token_id if self.sep_token_id is not None else 3 self._tokenizer.post_processor = processors.TemplateProcessing( single=f"{str(self.cls_token)}:0 $A:0 {str(self.sep_token)}:0", pair=f"{str(self.cls_token)}:0 $A:0 {str(self.sep_token)}:0 $B:1 {str(self.sep_token)}:1", special_tokens=[ (str(self.cls_token), cls_token_id), (str(self.sep_token), sep_token_id), ], ) __all__ = ["BertTokenizer"] BertTokenizerFast = BertTokenizer
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert/modeling_bert.py
src/transformers/models/bert/modeling_bert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" import warnings from collections.abc import Callable from dataclasses import dataclass from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ... import initialization as init from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...masking_utils import create_bidirectional_mask, create_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import apply_chunking_to_forward from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_bert import BertConfig logger = logging.get_logger(__name__) class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): # NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0]) buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1) buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids) token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length) else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: Optional[float] = None, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): if scaling is None: scaling = query.size(-1) ** -0.5 # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attention_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class BertSelfAttention(nn.Module): def __init__(self, config, is_causal=False, layer_idx=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.scaling = self.attention_head_size**-0.5 self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.is_causal = is_causal self.layer_idx = layer_idx def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.attention_head_size) # get all proj query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2) key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2) value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2) if past_key_values is not None: # decoder-only bert can have a simple dynamic cache for example current_past_key_values = past_key_values if isinstance(past_key_values, EncoderDecoderCache): current_past_key_values = past_key_values.self_attention_cache # save all key/value_layer to cache to be re-used for fast auto-regressive generation key_layer, value_layer = current_past_key_values.update( key_layer, value_layer, self.layer_idx, {"cache_position": cache_position}, ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout.p, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() return attn_output, attn_weights class BertCrossAttention(nn.Module): def __init__(self, config, is_causal=False, layer_idx=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.scaling = self.attention_head_size**-0.5 self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_causal = is_causal self.layer_idx = layer_idx def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[EncoderDecoderCache] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = encoder_hidden_states.shape[1] q_input_shape = (bsz, tgt_len, -1, self.attention_head_size) kv_input_shape = (bsz, src_len, -1, self.attention_head_size) # get query proj query_layer = self.query(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = past_key_values.is_updated.get(self.layer_idx) if past_key_values is not None else False if past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_layer = past_key_values.cross_attention_cache.layers[self.layer_idx].keys value_layer = past_key_values.cross_attention_cache.layers[self.layer_idx].values else: key_layer = self.key(encoder_hidden_states).view(*kv_input_shape).transpose(1, 2) value_layer = self.value(encoder_hidden_states).view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: # save all states to the cache key_layer, value_layer = past_key_values.cross_attention_cache.update( key_layer, value_layer, self.layer_idx ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout.p, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() return attn_output, attn_weights class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False): super().__init__() self.is_cross_attention = is_cross_attention attention_class = BertCrossAttention if is_cross_attention else BertSelfAttention self.self = attention_class(config, is_causal=is_causal, layer_idx=layer_idx) self.output = BertSelfOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask attention_output, attn_weights = self.self( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) attention_output = self.output(attention_output, hidden_states) return attention_output, attn_weights class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(GradientCheckpointingLayer): def __init__(self, config, layer_idx=None): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = BertAttention( config, is_causal=False, layer_idx=layer_idx, is_cross_attention=True, ) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: self_attention_output, _ = self.attention( hidden_states, attention_mask, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) attention_output = self_attention_output if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) cross_attention_output, _ = self.crossattention( self_attention_output, None, # attention_mask encoder_hidden_states, encoder_attention_mask, past_key_values=past_key_values, **kwargs, ) attention_output = cross_attention_output layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BertLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: for i, layer_module in enumerate(self.layer): hidden_states = layer_module( hidden_states, attention_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=True) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score @auto_docstring class BertPreTrainedModel(PreTrainedModel): config_class = BertConfig base_model_prefix = "bert" supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": BertLayer, "attentions": BertSelfAttention, "cross_attentions": BertCrossAttention, } @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" super()._init_weights(module) if isinstance(module, BertLMPredictionHead): init.zeros_(module.bias) elif isinstance(module, BertEmbeddings): init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1))) init.zeros_(module.token_type_ids) @dataclass @auto_docstring( custom_intro=""" Output type of [`BertForPreTraining`]. """ ) class BertForPreTrainingOutput(ModelOutput): r""" loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). """ loss: Optional[torch.FloatTensor] = None prediction_logits: Optional[torch.FloatTensor] = None seq_relationship_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @auto_docstring( custom_intro=""" The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ ) class BertModel(BertPreTrainedModel): _no_split_modules = ["BertEmbeddings", "BertLayer"] def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.gradient_checkpointing = False self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @check_model_inputs @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if use_cache and past_key_values is None: past_key_values = ( EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None or self.config.is_encoder_decoder else DynamicCache(config=self.config) ) if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if input_ids is not None: device = input_ids.device seq_length = input_ids.shape[1] else: device = inputs_embeds.device seq_length = inputs_embeds.shape[1] past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) attention_mask, encoder_attention_mask = self._create_attention_masks( attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, embedding_output=embedding_output, encoder_hidden_states=encoder_hidden_states, cache_position=cache_position, past_key_values=past_key_values, ) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_ids=position_ids, **kwargs, ) sequence_output = encoder_outputs.last_hidden_state pooled_output = self.pooler(sequence_output) if self.pooler is not None else None return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, ) def _create_attention_masks( self, attention_mask, encoder_attention_mask, embedding_output, encoder_hidden_states, cache_position, past_key_values, ): if self.config.is_decoder: attention_mask = create_causal_mask( config=self.config, input_embeds=embedding_output, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, ) else: attention_mask = create_bidirectional_mask( config=self.config, input_embeds=embedding_output, attention_mask=attention_mask, ) if encoder_attention_mask is not None: encoder_attention_mask = create_bidirectional_mask( config=self.config, input_embeds=embedding_output, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, ) return attention_mask, encoder_attention_mask @auto_docstring( custom_intro=""" Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next sentence prediction (classification)` head. """ ) class BertForPreTraining(BertPreTrainedModel): _tied_weights_keys = { "cls.predictions.decoder.weight": "bert.embeddings.word_embeddings.weight", "cls.predictions.decoder.bias": "cls.predictions.bias", } def __init__(self, config): super().__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, next_sentence_label: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.Tensor], BertForPreTrainingOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert/tokenization_bert_legacy.py
src/transformers/models/bert/tokenization_bert_legacy.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Bert.""" import collections import os import unicodedata from typing import Optional from ...tokenization_python import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizerLegacy(PreTrainedTokenizer): r""" Construct a BERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs, ): if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize( text, never_split=self.all_special_tokens if not split_special_tokens else None ): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) class BasicTokenizer: """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) or (cp >= 0x20000 and cp <= 0x2A6DF) or (cp >= 0x2A700 and cp <= 0x2B73F) or (cp >= 0x2B740 and cp <= 0x2B81F) or (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) ): return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer: """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` will return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens __all__ = ["BasicTokenizer", "BertTokenizerLegacy", "WordpieceTokenizer"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py
src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BERT checkpoint.""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): # Initialise PyTorch model config = BertConfig.from_json_file(bert_config_file) print(f"Building PyTorch model from configuration: {config}") model = BertForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_bert(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py
src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script can be used to convert a head-less TF2.x Bert model to PyTorch, as published on the official (now deprecated) GitHub: https://github.com/tensorflow/models/tree/v2.3.0/official/nlp/bert TF2.x uses different variable names from the original BERT (TF 1.4) implementation. The script re-maps the TF2.x Bert weight names to the original names, so the model can be imported with Huggingface/transformer. You may adapt this script to include classification/MLM/NSP/etc. heads. Note: This script is only working with an older version of the TensorFlow models repository (<= v2.3.0). Models trained with never versions are not compatible with this script. """ import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def load_tf2_weights_in_bert(model, tf_checkpoint_path, config): tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] layer_depth = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") name = full_name.split("/") if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f"Skipping non-model layer {full_name}") continue if "optimizer" in full_name: logger.info(f"Skipping optimization layer {full_name}") continue if name[0] == "model": # ignore initial 'model' name = name[1:] # figure out how many levels deep the name is depth = 0 for _name in name: if _name.startswith("layer_with_weights"): depth += 1 else: break layer_depth.append(depth) # read data array = tf.train.load_variable(tf_path, full_name) names.append("/".join(name)) arrays.append(array) logger.info(f"Read a total of {len(arrays):,} layers") # Sanity check if len(set(layer_depth)) != 1: raise ValueError(f"Found layer names with different depths (layer depth {list(set(layer_depth))})") layer_depth = list(set(layer_depth))[0] if layer_depth != 1: raise ValueError( "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP" " heads." ) # convert layers logger.info("Converting weights...") for full_name, array in zip(names, arrays): name = full_name.split("/") pointer = model trace = [] for i, m_name in enumerate(name): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("layer_with_weights"): layer_num = int(m_name.split("-")[-1]) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["embeddings", "LayerNorm"]) pointer = getattr(pointer, "embeddings") pointer = getattr(pointer, "LayerNorm") elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["encoder", "layer", str(layer_num - 4)]) pointer = getattr(pointer, "encoder") pointer = getattr(pointer, "layer") pointer = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["pooler", "dense"]) pointer = getattr(pointer, "pooler") pointer = getattr(pointer, "dense") elif m_name == "embeddings": trace.append("embeddings") pointer = getattr(pointer, "embeddings") if layer_num == 0: trace.append("word_embeddings") pointer = getattr(pointer, "word_embeddings") elif layer_num == 1: trace.append("position_embeddings") pointer = getattr(pointer, "position_embeddings") elif layer_num == 2: trace.append("token_type_embeddings") pointer = getattr(pointer, "token_type_embeddings") else: raise ValueError(f"Unknown embedding layer with name {full_name}") trace.append("weight") pointer = getattr(pointer, "weight") elif m_name == "_attention_layer": # self-attention layer trace.extend(["attention", "self"]) pointer = getattr(pointer, "attention") pointer = getattr(pointer, "self") elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["attention", "output", "LayerNorm"]) pointer = getattr(pointer, "attention") pointer = getattr(pointer, "output") pointer = getattr(pointer, "LayerNorm") elif m_name == "_attention_output_dense": # output attention dense trace.extend(["attention", "output", "dense"]) pointer = getattr(pointer, "attention") pointer = getattr(pointer, "output") pointer = getattr(pointer, "dense") elif m_name == "_output_dense": # output dense trace.extend(["output", "dense"]) pointer = getattr(pointer, "output") pointer = getattr(pointer, "dense") elif m_name == "_output_layer_norm": # output dense trace.extend(["output", "LayerNorm"]) pointer = getattr(pointer, "output") pointer = getattr(pointer, "LayerNorm") elif m_name == "_key_dense": # attention key trace.append("key") pointer = getattr(pointer, "key") elif m_name == "_query_dense": # attention query trace.append("query") pointer = getattr(pointer, "query") elif m_name == "_value_dense": # attention value trace.append("value") pointer = getattr(pointer, "value") elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["intermediate", "dense"]) pointer = getattr(pointer, "intermediate") pointer = getattr(pointer, "dense") elif m_name == "_output_layer_norm": # output layer norm trace.append("output") pointer = getattr(pointer, "output") # weights & biases elif m_name in ["bias", "beta"]: trace.append("bias") pointer = getattr(pointer, "bias") elif m_name in ["kernel", "gamma"]: trace.append("weight") pointer = getattr(pointer, "weight") else: logger.warning(f"Ignored {m_name}") # for certain layers reshape is necessary trace = ".".join(trace) if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)", trace) or re.match( r"(\S+)\.attention\.output\.dense\.weight", trace ): array = array.reshape(pointer.data.shape) if "kernel" in full_name: array = array.transpose() if pointer.shape == array.shape: pointer.data = torch.from_numpy(array) else: raise ValueError( f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:" f" {array.shape}" ) logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}") return model def convert_tf2_checkpoint_to_pytorch(tf_checkpoint_path, config_path, pytorch_dump_path): # Instantiate model logger.info(f"Loading model based on config from {config_path}...") config = BertConfig.from_json_file(config_path) model = BertModel(config) # Load weights from checkpoint logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}...") load_tf2_weights_in_bert(model, tf_checkpoint_path, config) # Save pytorch-model logger.info(f"Saving PyTorch model to {pytorch_dump_path}...") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model (must include filename).", ) args = parser.parse_args() convert_tf2_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert/__init__.py
src/transformers/models/bert/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_bert import * from .modeling_bert import * from .tokenization_bert import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py
src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script converts a lm-head checkpoint from the "Token Dropping" implementation into a PyTorch-compatible BERT model. The official implementation of "Token Dropping" can be found in the TensorFlow Models repository: https://github.com/tensorflow/models/tree/master/official/projects/token_dropping """ import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def convert_checkpoint_to_pytorch(tf_checkpoint_path: str, config_path: str, pytorch_dump_path: str): def get_masked_lm_array(name: str): full_name = f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE" array = tf.train.load_variable(tf_checkpoint_path, full_name) if "kernel" in name: array = array.transpose() return torch.from_numpy(array) def get_encoder_array(name: str): full_name = f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE" array = tf.train.load_variable(tf_checkpoint_path, full_name) if "kernel" in name: array = array.transpose() return torch.from_numpy(array) def get_encoder_layer_array(layer_index: int, name: str): full_name = f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE" array = tf.train.load_variable(tf_checkpoint_path, full_name) if "kernel" in name: array = array.transpose() return torch.from_numpy(array) def get_encoder_attention_layer_array(layer_index: int, name: str, original_shape): full_name = f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE" array = tf.train.load_variable(tf_checkpoint_path, full_name) array = array.reshape(original_shape) if "kernel" in name: array = array.transpose() return torch.from_numpy(array) print(f"Loading model based on config from {config_path}...") config = BertConfig.from_json_file(config_path) model = BertForMaskedLM(config) # Layers for layer_index in range(0, config.num_hidden_layers): layer: BertLayer = model.bert.encoder.layer[layer_index] # Self-attention self_attn: BertSelfAttention = layer.attention.self self_attn.query.weight.data = get_encoder_attention_layer_array( layer_index, "_query_dense/kernel", self_attn.query.weight.data.shape ) self_attn.query.bias.data = get_encoder_attention_layer_array( layer_index, "_query_dense/bias", self_attn.query.bias.data.shape ) self_attn.key.weight.data = get_encoder_attention_layer_array( layer_index, "_key_dense/kernel", self_attn.key.weight.data.shape ) self_attn.key.bias.data = get_encoder_attention_layer_array( layer_index, "_key_dense/bias", self_attn.key.bias.data.shape ) self_attn.value.weight.data = get_encoder_attention_layer_array( layer_index, "_value_dense/kernel", self_attn.value.weight.data.shape ) self_attn.value.bias.data = get_encoder_attention_layer_array( layer_index, "_value_dense/bias", self_attn.value.bias.data.shape ) # Self-attention Output self_output: BertSelfOutput = layer.attention.output self_output.dense.weight.data = get_encoder_attention_layer_array( layer_index, "_output_dense/kernel", self_output.dense.weight.data.shape ) self_output.dense.bias.data = get_encoder_attention_layer_array( layer_index, "_output_dense/bias", self_output.dense.bias.data.shape ) self_output.LayerNorm.weight.data = get_encoder_layer_array(layer_index, "_attention_layer_norm/gamma") self_output.LayerNorm.bias.data = get_encoder_layer_array(layer_index, "_attention_layer_norm/beta") # Intermediate intermediate: BertIntermediate = layer.intermediate intermediate.dense.weight.data = get_encoder_layer_array(layer_index, "_intermediate_dense/kernel") intermediate.dense.bias.data = get_encoder_layer_array(layer_index, "_intermediate_dense/bias") # Output bert_output: BertOutput = layer.output bert_output.dense.weight.data = get_encoder_layer_array(layer_index, "_output_dense/kernel") bert_output.dense.bias.data = get_encoder_layer_array(layer_index, "_output_dense/bias") bert_output.LayerNorm.weight.data = get_encoder_layer_array(layer_index, "_output_layer_norm/gamma") bert_output.LayerNorm.bias.data = get_encoder_layer_array(layer_index, "_output_layer_norm/beta") # Embeddings model.bert.embeddings.position_embeddings.weight.data = get_encoder_array("_position_embedding_layer/embeddings") model.bert.embeddings.token_type_embeddings.weight.data = get_encoder_array("_type_embedding_layer/embeddings") model.bert.embeddings.LayerNorm.weight.data = get_encoder_array("_embedding_norm_layer/gamma") model.bert.embeddings.LayerNorm.bias.data = get_encoder_array("_embedding_norm_layer/beta") # LM Head lm_head = model.cls.predictions.transform lm_head.dense.weight.data = get_masked_lm_array("dense/kernel") lm_head.dense.bias.data = get_masked_lm_array("dense/bias") lm_head.LayerNorm.weight.data = get_masked_lm_array("layer_norm/gamma") lm_head.LayerNorm.bias.data = get_masked_lm_array("layer_norm/beta") model.bert.embeddings.word_embeddings.weight.data = get_masked_lm_array("embedding_table") # Pooling model.bert.pooler = BertPooler(config=config) model.bert.pooler.dense.weight.data: BertPooler = get_encoder_array("_pooler_layer/kernel") model.bert.pooler.dense.bias.data: BertPooler = get_encoder_array("_pooler_layer/bias") # Export final model model.save_pretrained(pytorch_dump_path) # Integration test - should load without any errors ;) new_model = BertForMaskedLM.from_pretrained(pytorch_dump_path) print(new_model.eval()) print("Model conversion was done successfully!") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) args = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert/configuration_bert.py
src/transformers/models/bert/configuration_bert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT model configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class BertConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`BertModel`]. It is used to instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BERT [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`BertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Examples: ```python >>> from transformers import BertConfig, BertModel >>> # Initializing a BERT google-bert/bert-base-uncased style configuration >>> configuration = BertConfig() >>> # Initializing a model (with random weights) from the google-bert/bert-base-uncased style configuration >>> model = BertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "bert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, use_cache=True, classifier_dropout=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.classifier_dropout = classifier_dropout __all__ = ["BertConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/convnext/image_processing_convnext.py
src/transformers/models/convnext/image_processing_convnext.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ConvNeXT.""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...processing_utils import ImagesKwargs from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging from ...utils.import_utils import requires if is_vision_available(): import PIL logger = logging.get_logger(__name__) class ConvNextImageProcessorKwargs(ImagesKwargs, total=False): """ crop_pct (`float`, *optional*): Percentage of the image to crop. Only has an effect if size < 384. Can be overridden by `crop_pct` in the`preprocess` method. """ crop_pct: float @requires(backends=("vision",)) class ConvNextImageProcessor(BaseImageProcessor): r""" Constructs a ConvNeXT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`): Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can be overridden by `size` in the `preprocess` method. crop_pct (`float` *optional*, defaults to 224 / 256): Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be overridden by `crop_pct` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] valid_kwargs = ConvNextImageProcessorKwargs def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, crop_pct: Optional[float] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 384} size = get_size_dict(size, default_to_square=False) self.do_resize = do_resize self.size = size # Default value set here for backwards compatibility where the value in config is None self.crop_pct = crop_pct if crop_pct is not None else 224 / 256 self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: dict[str, int], crop_pct: float, resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. crop_pct (`float`): Percentage of the image to crop. Only has an effect if size < 384. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" not in size: raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}") shortest_edge = size["shortest_edge"] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct resize_shortest_edge = int(shortest_edge / crop_pct) resize_size = get_resize_output_image_size( image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format ) image = resize( image=image, size=resize_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # then crop to (shortest_edge, shortest_edge) return center_crop( image=image, size=(shortest_edge, shortest_edge), data_format=data_format, input_data_format=input_data_format, **kwargs, ) else: # warping (no cropping) when evaluated at 384 or larger return resize( image, size=(shortest_edge, shortest_edge), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, crop_pct: Optional[float] = None, resample: Optional[PILImageResampling] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. crop_pct (`float`, *optional*, defaults to `self.crop_pct`): Percentage of the image to crop if size < 384. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize crop_pct = crop_pct if crop_pct is not None else self.crop_pct resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor") validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize( image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format ) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) __all__ = ["ConvNextImageProcessor"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/convnext/image_processing_convnext_fast.py
src/transformers/models/convnext/image_processing_convnext_fast.py
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for ConvNeXT.""" from typing import Optional, Union import torch from torchvision.transforms.v2 import functional as F from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import BaseImageProcessorFast, group_images_by_shape, reorder_images from ...image_transforms import get_resize_output_image_size from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, ) from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, ) from .image_processing_convnext import ConvNextImageProcessorKwargs @auto_docstring class ConvNextImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {"shortest_edge": 384} default_to_square = False do_resize = True do_rescale = True do_normalize = True crop_pct = 224 / 256 valid_kwargs = ConvNextImageProcessorKwargs def __init__(self, **kwargs: Unpack[ConvNextImageProcessorKwargs]): super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[ConvNextImageProcessorKwargs]) -> BatchFeature: return super().preprocess(images, **kwargs) def resize( self, image: "torch.Tensor", size: dict[str, int], crop_pct: float, interpolation: PILImageResampling = PILImageResampling.BICUBIC, **kwargs, ) -> "torch.Tensor": """ Resize an image. Args: image (`torch.Tensor`): Image to resize. size (`dict[str, int]`): Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. crop_pct (`float`): Percentage of the image to crop. Only has an effect if size < 384. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. Returns: `torch.Tensor`: Resized image. """ if not size.shortest_edge: raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}") shortest_edge = size["shortest_edge"] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct resize_shortest_edge = int(shortest_edge / crop_pct) resize_size = get_resize_output_image_size( image, size=resize_shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST ) image = super().resize( image, SizeDict(height=resize_size[0], width=resize_size[1]), interpolation=interpolation, **kwargs, ) # then crop to (shortest_edge, shortest_edge) return self.center_crop( image, SizeDict(height=shortest_edge, width=shortest_edge), **kwargs, ) else: # warping (no cropping) when evaluated at 384 or larger return super().resize( image, SizeDict(height=shortest_edge, width=shortest_edge), interpolation=interpolation, **kwargs, ) def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: dict[str, int], crop_pct: float, interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: int, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize( image=stacked_images, size=size, crop_pct=crop_pct, interpolation=interpolation ) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_center_crop: stacked_images = self.center_crop(stacked_images, crop_size) # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["ConvNextImageProcessorFast"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/convnext/modeling_convnext.py
src/transformers/models/convnext/modeling_convnext.py
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ConvNext model.""" from typing import Optional import torch from torch import nn from ... import initialization as init from ...activations import ACT2FN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ...utils.backbone_utils import BackboneMixin from ...utils.generic import can_return_tuple from .configuration_convnext import ConvNextConfig logger = logging.get_logger(__name__) # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNext class ConvNextDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" class ConvNextLayerNorm(nn.LayerNorm): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, *, eps=1e-6, data_format="channels_last", **kwargs): super().__init__(normalized_shape, eps=eps, **kwargs) if data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {data_format}") self.data_format = data_format def forward(self, features: torch.Tensor) -> torch.Tensor: """ Args: features: Tensor of shape (batch_size, channels, height, width) OR (batch_size, height, width, channels) """ if self.data_format == "channels_first": features = features.permute(0, 2, 3, 1) features = super().forward(features) features = features.permute(0, 3, 1, 2) else: features = super().forward(features) return features class ConvNextEmbeddings(nn.Module): """This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. """ def __init__(self, config): super().__init__() self.patch_embeddings = nn.Conv2d( config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size ) self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first") self.num_channels = config.num_channels def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.patch_embeddings(pixel_values) embeddings = self.layernorm(embeddings) return embeddings class ConvNextLayer(nn.Module): """This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Args: config ([`ConvNextConfig`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0. """ def __init__(self, config, dim, drop_path=0): super().__init__() self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv self.layernorm = ConvNextLayerNorm(dim, eps=1e-6) self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers self.act = ACT2FN[config.hidden_act] self.pwconv2 = nn.Linear(4 * dim, dim) self.layer_scale_parameter = ( nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True) if config.layer_scale_init_value > 0 else None ) self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, features: torch.Tensor) -> torch.Tensor: residual = features features = self.dwconv(features) features = features.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) features = self.layernorm(features) features = self.pwconv1(features) features = self.act(features) features = self.pwconv2(features) if self.layer_scale_parameter is not None: features = self.layer_scale_parameter * features features = features.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) features = residual + self.drop_path(features) return features class ConvNextStage(nn.Module): """ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config ([`ConvNextConfig`]): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`list[float]`): Stochastic depth rates for each layer. """ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None): super().__init__() if in_channels != out_channels or stride > 1: self.downsampling_layer = nn.ModuleList( [ ConvNextLayerNorm(in_channels, eps=1e-6, data_format="channels_first"), nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride), ] ) else: self.downsampling_layer = nn.ModuleList() drop_path_rates = drop_path_rates or [0.0] * depth self.layers = nn.ModuleList( [ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)] ) def forward(self, features: torch.Tensor) -> torch.Tensor: for layer in self.downsampling_layer: features = layer(features) for layer in self.layers: features = layer(features) return features class ConvNextEncoder(nn.Module): def __init__(self, config): super().__init__() self.stages = nn.ModuleList() drop_path_rates = [ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu").split(config.depths) ] prev_chs = config.hidden_sizes[0] for i in range(config.num_stages): out_chs = config.hidden_sizes[i] stage = ConvNextStage( config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i], ) self.stages.append(stage) prev_chs = out_chs def forward( self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool] = False ) -> BaseModelOutputWithNoAttention: all_hidden_states = [hidden_states] if output_hidden_states else None for layer_module in self.stages: hidden_states = layer_module(hidden_states) if all_hidden_states is not None: all_hidden_states.append(hidden_states) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) @auto_docstring class ConvNextPreTrainedModel(PreTrainedModel): config: ConvNextConfig base_model_prefix = "convnext" main_input_name = "pixel_values" input_modalities = ("image",) _no_split_modules = ["ConvNextLayer"] _can_record_outputs = {} # hidden states are collected explicitly @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" super()._init_weights(module) if isinstance(module, ConvNextLayer): if module.layer_scale_parameter is not None: init.constant_(module.layer_scale_parameter, self.config.layer_scale_init_value) @auto_docstring class ConvNextModel(ConvNextPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = ConvNextEmbeddings(config) self.encoder = ConvNextEncoder(config) # final layernorm layer self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[bool] = None, **kwargs ) -> BaseModelOutputWithPoolingAndNoAttention: if output_hidden_states is None: output_hidden_states = self.config.output_hidden_states if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs: BaseModelOutputWithNoAttention = self.encoder( embedding_output, output_hidden_states=output_hidden_states ) last_hidden_state = encoder_outputs.last_hidden_state # global average pooling, (N, C, H, W) -> (N, C) pooled_output = self.layernorm(last_hidden_state.mean([-2, -1])) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @auto_docstring( custom_intro=""" ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ) class ConvNextForImageClassification(ConvNextPreTrainedModel): accepts_loss_kwargs = False def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.convnext = ConvNextModel(config) # Classifier head if config.num_labels > 0: self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) else: self.classifier = nn.Identity() # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, **kwargs ) -> ImageClassifierOutputWithNoAttention: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs: BaseModelOutputWithPoolingAndNoAttention = self.convnext(pixel_values, **kwargs) pooled_output = outputs.pooler_output logits = self.classifier(pooled_output) loss = None if labels is not None: loss = self.loss_function(labels=labels, pooled_logits=logits, config=self.config) return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) @auto_docstring( custom_intro=""" ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer. """ ) class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin): has_attentions = False def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.embeddings = ConvNextEmbeddings(config) self.encoder = ConvNextEncoder(config) self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes # Add layer norms to hidden states of out_features hidden_states_norms = {} for stage, num_channels in zip(self._out_features, self.channels): hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format="channels_first") self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) # initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, **kwargs ) -> BackboneOutput: r""" Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") >>> model = AutoBackbone.from_pretrained("facebook/convnext-tiny-224") >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) ```""" if output_hidden_states is None: output_hidden_states = self.config.output_hidden_states embedding_output = self.embeddings(pixel_values) outputs: BaseModelOutputWithPoolingAndNoAttention = self.encoder(embedding_output, output_hidden_states=True) hidden_states = outputs.hidden_states feature_maps = [] for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: hidden_state = self.hidden_states_norms[stage](hidden_state) feature_maps.append(hidden_state) return BackboneOutput( feature_maps=tuple(feature_maps), hidden_states=hidden_states if output_hidden_states else None, ) __all__ = ["ConvNextForImageClassification", "ConvNextModel", "ConvNextPreTrainedModel", "ConvNextBackbone"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/convnext/configuration_convnext.py
src/transformers/models/convnext/configuration_convnext.py
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ConvNeXT model configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class ConvNextConfig(BackboneConfigMixin, PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvNeXT [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_size (`int`, *optional*, defaults to 4): Patch size to use in the patch embedding layer. num_stages (`int`, *optional*, defaults to 4): The number of stages in the model. hidden_sizes (`list[int]`, *optional*, defaults to [96, 192, 384, 768]): Dimensionality (hidden size) at each stage. depths (`list[int]`, *optional*, defaults to [3, 3, 9, 3]): Depth (number of blocks) for each stage. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 1e-6): The initial value for the layer scale. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop rate for stochastic depth. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import ConvNextConfig, ConvNextModel >>> # Initializing a ConvNext convnext-tiny-224 style configuration >>> configuration = ConvNextConfig() >>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration >>> model = ConvNextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "convnext" def __init__( self, num_channels=3, patch_size=4, num_stages=4, hidden_sizes=None, depths=None, hidden_act="gelu", initializer_range=0.02, layer_norm_eps=1e-12, layer_scale_init_value=1e-6, drop_path_rate=0.0, image_size=224, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) self.num_channels = num_channels self.patch_size = patch_size self.num_stages = num_stages self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes self.depths = [3, 3, 9, 3] if depths is None else depths self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.image_size = image_size self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) __all__ = ["ConvNextConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/convnext/__init__.py
src/transformers/models/convnext/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_convnext import * from .feature_extraction_convnext import * from .image_processing_convnext import * from .image_processing_convnext_fast import * from .modeling_convnext import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/convnext/convert_convnext_to_pytorch.py
src/transformers/models/convnext/convert_convnext_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ConvNext checkpoints from the original repository. URL: https://github.com/facebookresearch/ConvNeXt""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, ConvNextForImageClassification, ConvNextImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_convnext_config(checkpoint_url): config = ConvNextConfig() if "tiny" in checkpoint_url: depths = [3, 3, 9, 3] hidden_sizes = [96, 192, 384, 768] if "small" in checkpoint_url: depths = [3, 3, 27, 3] hidden_sizes = [96, 192, 384, 768] if "base" in checkpoint_url: depths = [3, 3, 27, 3] hidden_sizes = [128, 256, 512, 1024] if "large" in checkpoint_url: depths = [3, 3, 27, 3] hidden_sizes = [192, 384, 768, 1536] if "xlarge" in checkpoint_url: depths = [3, 3, 27, 3] hidden_sizes = [256, 512, 1024, 2048] if "1k" in checkpoint_url: num_labels = 1000 filename = "imagenet-1k-id2label.json" expected_shape = (1, 1000) else: num_labels = 21841 filename = "imagenet-22k-id2label.json" expected_shape = (1, 21841) repo_id = "huggingface/label-files" config.num_labels = num_labels id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} if "1k" not in checkpoint_url: # this dataset contains 21843 labels but the model only has 21841 # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18 del id2label[9205] del id2label[15027] config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} config.hidden_sizes = hidden_sizes config.depths = depths return config, expected_shape def rename_key(name): if "downsample_layers.0.0" in name: name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings") if "downsample_layers.0.1" in name: name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on if "downsample_layers.1.0" in name: name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0") if "downsample_layers.1.1" in name: name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1") if "downsample_layers.2.0" in name: name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0") if "downsample_layers.2.1" in name: name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1") if "downsample_layers.3.0" in name: name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0") if "downsample_layers.3.1" in name: name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1") if "stages" in name and "downsampling_layer" not in name: # stages.0.0. for instance should be renamed to stages.0.layers.0. name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :] if "stages" in name: name = name.replace("stages", "encoder.stages") if "norm" in name: name = name.replace("norm", "layernorm") if "gamma" in name: name = name.replace("gamma", "layer_scale_parameter") if "head" in name: name = name.replace("head", "classifier") return name # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_convnext_checkpoint(checkpoint_url, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our ConvNext structure. """ # define ConvNext configuration based on URL config, expected_shape = get_convnext_config(checkpoint_url) # load original state_dict from URL state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"] # rename keys for key in state_dict.copy(): val = state_dict.pop(key) state_dict[rename_key(key)] = val # add prefix to all keys expect classifier head for key in state_dict.copy(): val = state_dict.pop(key) if not key.startswith("classifier"): key = "convnext." + key state_dict[key] = val # load HuggingFace model model = ConvNextForImageClassification(config) model.load_state_dict(state_dict) model.eval() # Check outputs on an image, prepared by ConvNextImageProcessor size = 224 if "224" in checkpoint_url else 384 image_processor = ConvNextImageProcessor(size=size) pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values logits = model(pixel_values).logits # note: the logits below were obtained without center cropping if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth": expected_logits = torch.tensor([-0.1210, -0.6605, 0.1918]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth": expected_logits = torch.tensor([-0.4473, -0.1847, -0.6365]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth": expected_logits = torch.tensor([0.4525, 0.7539, 0.0308]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_384.pth": expected_logits = torch.tensor([0.3561, 0.6350, -0.0384]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth": expected_logits = torch.tensor([0.4174, -0.0989, 0.1489]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_384.pth": expected_logits = torch.tensor([0.2513, -0.1349, -0.1613]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth": expected_logits = torch.tensor([1.2980, 0.3631, -0.1198]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth": expected_logits = torch.tensor([1.2963, 0.1227, 0.1723]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth": expected_logits = torch.tensor([1.7956, 0.8390, 0.2820]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth": expected_logits = torch.tensor([-0.2822, -0.0502, -0.0878]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth": expected_logits = torch.tensor([-0.5672, -0.0730, -0.4348]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth": expected_logits = torch.tensor([0.2681, 0.2365, 0.6246]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth": expected_logits = torch.tensor([-0.2642, 0.3931, 0.5116]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth": expected_logits = torch.tensor([-0.6677, -0.1873, -0.8379]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth": expected_logits = torch.tensor([-0.7749, -0.2967, -0.6444]) else: raise ValueError(f"Unknown URL: {checkpoint_url}") assert torch.allclose(logits[0, :3], expected_logits, atol=1e-3) assert logits.shape == expected_shape Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) print("Pushing model to the hub...") model_name = "convnext" if "tiny" in checkpoint_url: model_name += "-tiny" elif "small" in checkpoint_url: model_name += "-small" elif "base" in checkpoint_url: model_name += "-base" elif "xlarge" in checkpoint_url: model_name += "-xlarge" elif "large" in checkpoint_url: model_name += "-large" if "224" in checkpoint_url: model_name += "-224" elif "384" in checkpoint_url: model_name += "-384" if "22k" in checkpoint_url and "1k" not in checkpoint_url: model_name += "-22k" if "22k" in checkpoint_url and "1k" in checkpoint_url: model_name += "-22k-1k" model.push_to_hub(repo_id=f"nielsr/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth", type=str, help="URL of the original ConvNeXT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model directory.", ) args = parser.parse_args() convert_convnext_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/groupvit/modeling_groupvit.py
src/transformers/models/groupvit/modeling_groupvit.py
# coding=utf-8 # Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch GroupViT model.""" import collections.abc from dataclasses import dataclass from typing import Any, Optional, Union import numpy as np import torch from torch import nn from ... import initialization as init from ...activations import ACT2FN from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig logger = logging.get_logger(__name__) # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->groupvit def groupvit_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 def hard_softmax(logits: torch.Tensor, dim: int): y_soft = logits.softmax(dim) # Straight through. index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft return ret def gumbel_softmax(logits: torch.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> torch.Tensor: # more stable https://github.com/pytorch/pytorch/issues/41663 gumbel_dist = torch.distributions.gumbel.Gumbel( torch.tensor(0.0, device=logits.device, dtype=logits.dtype), torch.tensor(1.0, device=logits.device, dtype=logits.dtype), ) gumbels = gumbel_dist.sample(logits.shape) gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau) y_soft = gumbels.softmax(dim) if hard: # Straight through. index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft else: # Reparameterization trick. ret = y_soft return ret def resize_attention_map(attentions, height, width, align_corners=False): """ Args: attentions (`torch.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width] height (`int`): height of the output attention map width (`int`): width of the output attention map align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`. Returns: `torch.Tensor`: resized attention map of shape [batch_size, groups, height, width] """ scale = (height * width // attentions.shape[2]) ** 0.5 if height > width: feat_width = int(np.round(width / scale)) feat_height = attentions.shape[2] // feat_width else: feat_height = int(np.round(height / scale)) feat_width = attentions.shape[2] // feat_height batch_size = attentions.shape[0] groups = attentions.shape[1] # number of group token # [batch_size, groups, height*width, groups] -> [batch_size, groups, height, width] attentions = attentions.reshape(batch_size, groups, feat_height, feat_width) attentions = nn.functional.interpolate( attentions, size=(height, width), mode="bilinear", align_corners=align_corners ) return attentions def get_grouping_from_attentions(attentions, hw_shape): """ Args: attentions (`tuple(torch.FloatTensor)`: tuple of attention maps returned by `GroupViTVisionTransformer` hw_shape (`tuple(int)`): height and width of the output attention map Returns: `torch.Tensor`: the attention map of shape [batch_size, groups, height, width] """ attn_maps = [] with torch.no_grad(): prev_attn_masks = None for attn_masks in attentions: # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups] attn_masks = attn_masks.permute(0, 2, 1).contiguous() if prev_attn_masks is None: prev_attn_masks = attn_masks else: prev_attn_masks = prev_attn_masks @ attn_masks # [batch_size, heightxwidth, num_groups] -> [batch_size, num_groups, heightxwidth] -> [batch_size, num_groups, height, width] cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape) attn_maps.append(cur_attn_map) # [batch_size, num_groups, height, width] final_grouping = attn_maps[-1] return final_grouping class GroupViTCrossAttentionLayer(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.attn = GroupViTAttention(config) self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = GroupViTMLP(config) self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, query, key): x = query x = x + self.attn(query, encoder_hidden_states=key)[0] x = x + self.mlp(self.norm2(x)) x = self.norm_post(x) return x class GroupViTAssignAttention(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.scale = config.hidden_size**-0.5 self.q_proj = nn.Linear(config.hidden_size, config.hidden_size) self.k_proj = nn.Linear(config.hidden_size, config.hidden_size) self.v_proj = nn.Linear(config.hidden_size, config.hidden_size) self.proj = nn.Linear(config.hidden_size, config.hidden_size) self.assign_eps = config.assign_eps def get_attn(self, attn, gumbel=True, hard=True): if gumbel and self.training: attn = gumbel_softmax(attn, dim=-2, hard=hard) else: if hard: attn = hard_softmax(attn, dim=-2) else: attn = nn.functional.softmax(attn, dim=-2) return attn def forward(self, query, key): value = key # [batch_size, query_length, channels] query = self.q_proj(query) # [batch_size, key_length, channels] key = self.k_proj(key) # [batch_size, key_length, channels] value = self.v_proj(value) # [batch_size, query_length, key_length] raw_attn = (query @ key.transpose(-2, -1)) * self.scale attn = self.get_attn(raw_attn) soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False) attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps) out = attn @ value out = self.proj(out) return out, soft_attn class GroupViTTokenAssign(nn.Module): def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group): super().__init__() self.num_output_group = num_output_group # norm on group_tokens self.norm_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) assign_mlp_ratio = ( config.assign_mlp_ratio if isinstance(config.assign_mlp_ratio, collections.abc.Iterable) else (config.assign_mlp_ratio, config.assign_mlp_ratio) ) tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio] self.mlp_inter = GroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group) self.norm_post_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # norm on x self.norm_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pre_assign_attn = GroupViTCrossAttentionLayer(config) self.assign = GroupViTAssignAttention(config) self.norm_new_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp_channels = GroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size) def project_group_token(self, group_tokens): """ Args: group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels] Returns: projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels] """ # [B, num_output_groups, C] <- [B, num_group_tokens, C] projected_group_tokens = self.mlp_inter(group_tokens) projected_group_tokens = self.norm_post_tokens(projected_group_tokens) return projected_group_tokens def forward(self, image_tokens, group_tokens): """ Args: image_tokens (`torch.Tensor`): image tokens, of shape [batch_size, input_length, channels] group_tokens (`torch.Tensor`): group tokens, [batch_size, num_group_tokens, channels] """ group_tokens = self.norm_tokens(group_tokens) image_tokens = self.norm_x(image_tokens) # [batch_size, num_output_groups, channels] projected_group_tokens = self.project_group_token(group_tokens) projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens) new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens) new_image_tokens += projected_group_tokens new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens)) return new_image_tokens, attention @dataclass @auto_docstring class GroupViTModelOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. segmentation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): Classification scores for each pixel. <Tip warning={true}> The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed. </Tip> text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`GroupViTTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`GroupViTVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`GroupViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`GroupViTVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None segmentation_logits: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) class GroupViTPatchEmbeddings(nn.Module): """ Image to Patch Embedding. """ def __init__( self, image_size: int = 224, patch_size: Union[int, tuple[int, int]] = 16, num_channels: int = 3, embed_dim: int = 768, ): super().__init__() image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) x = self.projection(pixel_values).flatten(2).transpose(1, 2) return x class GroupViTVisionEmbeddings(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.patch_embeddings = GroupViTPatchEmbeddings( image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.hidden_size, ) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size)) self.dropout = nn.Dropout(config.dropout) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.patch_size = config.patch_size self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing and no class embeddings. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] num_positions = self.position_embeddings.shape[1] # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings patch_pos_embed = self.position_embeddings dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return patch_pos_embed def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) embeddings = self.layernorm(embeddings) batch_size, seq_len, _ = embeddings.size() # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->GroupViT class GroupViTTextEmbeddings(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] max_position_embedding = self.position_embedding.weight.shape[0] if seq_length > max_position_embedding: raise ValueError( f"Sequence length must be less than max_position_embeddings (got `sequence length`: " f"{seq_length} and max_position_embeddings: {max_position_embedding}" ) if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class GroupViTStage(nn.Module): """This corresponds to the `GroupingLayer` class in the GroupViT implementation.""" def __init__( self, config: GroupViTVisionConfig, depth: int, num_prev_group_token: int, num_group_token: int, num_output_group: int, ): super().__init__() self.depth = depth self.num_group_token = num_group_token if num_group_token > 0: self.group_token = nn.Parameter(torch.zeros(1, num_group_token, config.hidden_size)) else: self.group_token = None self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(depth)]) if num_group_token > 0: self.downsample = GroupViTTokenAssign( config=config, num_group_token=num_group_token, num_output_group=num_output_group, ) else: self.downsample = None if num_prev_group_token > 0 and num_group_token > 0: self.group_projector = nn.Sequential( nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps), GroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token), ) else: self.group_projector = None @property def with_group_token(self): return self.group_token is not None def split_x(self, x): if self.with_group_token: return x[:, : -self.num_group_token], x[:, -self.num_group_token :] else: return x, None def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor] = None) -> torch.Tensor: if group_token is None: return x return torch.cat([x, group_token], dim=1) def forward( self, hidden_states: torch.Tensor, prev_group_token: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the grouping tensors of Grouping block. """ if self.with_group_token: group_token = self.group_token.expand(hidden_states.size(0), -1, -1) if self.group_projector is not None: group_token = group_token + self.group_projector(prev_group_token) else: group_token = None x = hidden_states cat_x = self.concat_x(x, group_token) for layer in self.layers: layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None) cat_x = layer_out[0] x, group_token = self.split_x(cat_x) attention = None if self.downsample is not None: x, attention = self.downsample(x, group_token) outputs = (x, group_token) if output_attentions: outputs = outputs + (attention,) return outputs class GroupViTMLP(nn.Module): def __init__( self, config: GroupViTVisionConfig, hidden_size: Optional[int] = None, intermediate_size: Optional[int] = None, output_size: Optional[int] = None, ): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] hidden_size = hidden_size if hidden_size is not None else config.hidden_size intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size output_size = output_size if output_size is not None else hidden_size self.fc1 = nn.Linear(hidden_size, intermediate_size) self.fc2 = nn.Linear(intermediate_size, output_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class GroupViTMixerMLP(GroupViTMLP): def forward(self, x): x = super().forward(x.transpose(1, 2)) return x.transpose(1, 2) class GroupViTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() is_cross_attention = encoder_hidden_states is not None # get query proj query_states = self.q_proj(hidden_states) * self.scale if is_cross_attention: key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz) value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoderLayer with AltCLIP->GroupViT class GroupViTEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: GroupViTConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = GroupViTAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = GroupViTMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs @auto_docstring class GroupViTPreTrainedModel(PreTrainedModel): config: GroupViTConfig base_model_prefix = "groupvit" input_modalities = ("image", "text") supports_gradient_checkpointing = True @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" init_range = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): init.normal_(module.weight, mean=0.0, std=init_range) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm1d)): init.zeros_(module.bias) init.ones_(module.weight) if getattr(module, "running_mean", None) is not None: init.zeros_(module.running_mean) init.ones_(module.running_var) init.zeros_(module.num_batches_tracked) factor = self.config.initializer_factor if isinstance(module, GroupViTTextEmbeddings): init.normal_(module.token_embedding.weight, mean=0.0, std=factor * 0.02) init.normal_(module.position_embedding.weight, mean=0.0, std=factor * 0.02) init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1))) elif isinstance(module, GroupViTAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor init.normal_(module.q_proj.weight, std=in_proj_std) init.normal_(module.k_proj.weight, std=in_proj_std) init.normal_(module.v_proj.weight, std=in_proj_std) init.normal_(module.out_proj.weight, std=out_proj_std)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/groupvit/configuration_groupvit.py
src/transformers/models/groupvit/configuration_groupvit.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GroupViT model configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class GroupViTTextConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`GroupViTTextModel`]. It is used to instantiate an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the GroupViT text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GroupViTModel`]. hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 1024): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 77): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import GroupViTTextConfig, GroupViTTextModel >>> # Initializing a GroupViTTextModel with nvidia/groupvit-gcc-yfcc style configuration >>> configuration = GroupViTTextConfig() >>> model = GroupViTTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "groupvit_text_model" base_config_key = "text_config" def __init__( self, vocab_size=49408, hidden_size=256, intermediate_size=1024, num_hidden_layers=12, num_attention_heads=4, max_position_embeddings=77, hidden_act="quick_gelu", layer_norm_eps=1e-5, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=49406, eos_token_id=49407, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.dropout = dropout self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout class GroupViTVisionConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`GroupViTVisionModel`]. It is used to instantiate an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 384): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 1536): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. depths (`list[int]`, *optional*, defaults to [6, 3, 3]): The number of layers in each encoder block. num_group_tokens (`list[int]`, *optional*, defaults to [64, 8, 0]): The number of group tokens for each stage. num_output_groups (`list[int]`, *optional*, defaults to [64, 8, 8]): The number of output groups for each stage, 0 means no group. num_attention_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import GroupViTVisionConfig, GroupViTVisionModel >>> # Initializing a GroupViTVisionModel with nvidia/groupvit-gcc-yfcc style configuration >>> configuration = GroupViTVisionConfig() >>> model = GroupViTVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "groupvit_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=384, intermediate_size=1536, depths=[6, 3, 3], num_hidden_layers=12, num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=6, image_size=224, patch_size=16, num_channels=3, hidden_act="gelu", layer_norm_eps=1e-5, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, assign_eps=1.0, assign_mlp_ratio=[0.5, 4], **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.depths = depths if num_hidden_layers != sum(depths): logger.warning( f"Manually setting num_hidden_layers to {num_hidden_layers}, but we expect num_hidden_layers =" f" sum(depth) = {sum(depths)}" ) self.num_hidden_layers = num_hidden_layers self.num_group_tokens = num_group_tokens self.num_output_groups = num_output_groups self.num_attention_heads = num_attention_heads self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.assign_eps = assign_eps self.assign_mlp_ratio = assign_mlp_ratio class GroupViTConfig(PreTrainedConfig): r""" [`GroupViTConfig`] is the configuration class to store the configuration of a [`GroupViTModel`]. It is used to instantiate a GroupViT model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`GroupViTTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`GroupViTVisionConfig`]. projection_dim (`int`, *optional*, defaults to 256): Dimensionality of text and vision projection layers. projection_intermediate_dim (`int`, *optional*, defaults to 4096): Dimensionality of intermediate layer of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original GroupViT implementation. kwargs (*optional*): Dictionary of keyword arguments. """ model_type = "groupvit" sub_configs = {"text_config": GroupViTTextConfig, "vision_config": GroupViTVisionConfig} def __init__( self, text_config=None, vision_config=None, projection_dim=256, projection_intermediate_dim=4096, logit_scale_init_value=2.6592, **kwargs, ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). text_config_dict = kwargs.pop("text_config_dict", None) vision_config_dict = kwargs.pop("vision_config_dict", None) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: text_config = {} # This is the complete result when using `text_config_dict`. _text_config_dict = GroupViTTextConfig(**text_config_dict).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key != "transformers_version": # If specified in `text_config_dict` if key in text_config_dict: message = ( f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " f'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`text_config_dict` is provided which will be used to initialize `GroupViTTextConfig`. " f'The value `text_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: vision_config = {} # This is the complete result when using `vision_config_dict`. _vision_config_dict = GroupViTVisionConfig(**vision_config_dict).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: _vision_config_dict["id2label"] = { str(key): value for key, value in _vision_config_dict["id2label"].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key != "transformers_version": # If specified in `vision_config_dict` if key in vision_config_dict: message = ( f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different " f'values. The value `vision_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`vision_config_dict` is provided which will be used to initialize `GroupViTVisionConfig`." f' The value `vision_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict) if text_config is None: text_config = GroupViTTextConfig() logger.info("`text_config` is `None`. initializing the `GroupViTTextConfig` with default values.") elif isinstance(text_config, dict): text_config = GroupViTTextConfig(**text_config) if vision_config is None: vision_config = GroupViTVisionConfig() logger.info("`vision_config` is `None`. initializing the `GroupViTVisionConfig` with default values.") elif isinstance(vision_config, dict): vision_config = GroupViTVisionConfig(**vision_config) self.text_config = text_config self.vision_config = vision_config self.projection_dim = projection_dim self.projection_intermediate_dim = projection_intermediate_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_range = 0.02 self.initializer_factor = 1.0 self.output_segmentation = False super().__init__(**kwargs) __all__ = ["GroupViTConfig", "GroupViTTextConfig", "GroupViTVisionConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py
src/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert GroupViT checkpoints from the original repository. URL: https://github.com/NVlabs/GroupViT """ import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def rename_key(name): # vision encoder if "img_encoder.pos_embed" in name: name = name.replace("img_encoder.pos_embed", "vision_model.embeddings.position_embeddings") if "img_encoder.patch_embed.proj" in name: name = name.replace("img_encoder.patch_embed.proj", "vision_model.embeddings.patch_embeddings.projection") if "img_encoder.patch_embed.norm" in name: name = name.replace("img_encoder.patch_embed.norm", "vision_model.embeddings.layernorm") if "img_encoder.layers" in name: name = name.replace("img_encoder.layers", "vision_model.encoder.stages") if "blocks" in name and "res" not in name: name = name.replace("blocks", "layers") if "attn" in name and "pre_assign" not in name: name = name.replace("attn", "self_attn") if "proj" in name and "self_attn" in name and "text" not in name: name = name.replace("proj", "out_proj") if "pre_assign_attn.attn.proj" in name: name = name.replace("pre_assign_attn.attn.proj", "pre_assign_attn.attn.out_proj") if "norm1" in name: name = name.replace("norm1", "layer_norm1") if "norm2" in name and "pre_assign" not in name: name = name.replace("norm2", "layer_norm2") if "img_encoder.norm" in name: name = name.replace("img_encoder.norm", "vision_model.layernorm") # text encoder if "text_encoder.token_embedding" in name: name = name.replace("text_encoder.token_embedding", "text_model.embeddings.token_embedding") if "text_encoder.positional_embedding" in name: name = name.replace("text_encoder.positional_embedding", "text_model.embeddings.position_embedding.weight") if "text_encoder.transformer.resblocks." in name: name = name.replace("text_encoder.transformer.resblocks.", "text_model.encoder.layers.") if "ln_1" in name: name = name.replace("ln_1", "layer_norm1") if "ln_2" in name: name = name.replace("ln_2", "layer_norm2") if "c_fc" in name: name = name.replace("c_fc", "fc1") if "c_proj" in name: name = name.replace("c_proj", "fc2") if "text_encoder" in name: name = name.replace("text_encoder", "text_model") if "ln_final" in name: name = name.replace("ln_final", "final_layer_norm") # projection layers if "img_projector.linear_hidden." in name: name = name.replace("img_projector.linear_hidden.", "visual_projection.") if "img_projector.linear_out." in name: name = name.replace("img_projector.linear_out.", "visual_projection.3.") if "text_projector.linear_hidden" in name: name = name.replace("text_projector.linear_hidden", "text_projection") if "text_projector.linear_out" in name: name = name.replace("text_projector.linear_out", "text_projection.3") return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy(): val = orig_state_dict.pop(key) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors key_split = key.split(".") stage_num, layer_num = int(key_split[2]), int(key_split[4]) dim = config.vision_config.hidden_size if "weight" in key: orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.weight" ] = val[:dim, :] orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.weight" ] = val[dim : dim * 2, :] orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.weight" ] = val[-dim:, :] else: orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.bias" ] = val[:dim] orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.bias" ] = val[dim : dim * 2] orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.bias" ] = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors key_split = key.split(".") layer_num = int(key_split[3]) dim = config.text_config.hidden_size if "weight" in key: orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[ dim : dim * 2, : ] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :] else: orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:] else: new_name = rename_key(key) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): orig_state_dict[new_name] = val.squeeze_() else: orig_state_dict[new_name] = val return orig_state_dict # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_groupvit_checkpoint( checkpoint_path, pytorch_dump_folder_path, model_name="groupvit-gcc-yfcc", push_to_hub=False ): """ Copy/paste/tweak model's weights to the Transformers design. """ config = GroupViTConfig() model = GroupViTModel(config).eval() state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model"] new_state_dict = convert_state_dict(state_dict, config) missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(unexpected_keys) == 0) # verify result processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") image = prepare_img() inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) if model_name == "groupvit-gcc-yfcc": expected_logits = torch.tensor([[13.3523, 6.3629]]) elif model_name == "groupvit-gcc-redcaps": expected_logits = torch.tensor([[16.1873, 8.6230]]) else: raise ValueError(f"Model name {model_name} not supported.") assert torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3) processor.save_pretrained(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) print("Successfully saved processor and model to", pytorch_dump_folder_path) if push_to_hub: print("Pushing to the hub...") processor.push_to_hub(repo_id=f"nielsr/{model_name}") model.push_to_hub(repo_id=f"nielsr/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the Hugging Face hub using the provided `model_name`.", ) args = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/groupvit/__init__.py
src/transformers/models/groupvit/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_groupvit import * from .modeling_groupvit import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py
src/transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_mm_grounding_dino.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PreTrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class MMGroundingDinoConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`MMGroundingDinoModel`]. It is used to instantiate a MM Grounding DINO model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MM Grounding DINO tiny architecture [openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det](https://huggingface.co/openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `SwinConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, defaults to `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`): The config object or dictionary of the text backbone. num_queries (`int`, *optional*, defaults to 900): Number of object queries, i.e. detection slots. This is the maximal number of objects [`MMGroundingDinoModel`] can detect in a single image. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as an encoder/decoder or not. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. num_feature_levels (`int`, *optional*, defaults to 4): The number of input feature levels. encoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the encoder. decoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the decoder. two_stage (`bool`, *optional*, defaults to `True`): Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of Grounding DINO, which are further fed into the decoder for iterative bounding box refinement. class_cost (`float`, *optional*, defaults to 1.0): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5.0): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. bbox_loss_coefficient (`float`, *optional*, defaults to 5.0): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. disable_custom_kernels (`bool`, *optional*, defaults to `False`): Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom kernels are not supported by PyTorch ONNX export. max_text_len (`int`, *optional*, defaults to 256): The maximum length of the text input. text_enhancer_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the text enhancer. fusion_droppath (`float`, *optional*, defaults to 0.1): The droppath ratio for the fusion module. fusion_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the fusion module. embedding_init_target (`bool`, *optional*, defaults to `True`): Whether to initialize the target with Embedding weights. query_dim (`int`, *optional*, defaults to 4): The dimension of the query vector. positional_embedding_temperature (`float`, *optional*, defaults to 20): The temperature for Sine Positional Embedding that is used together with vision backbone. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. Examples: ```python >>> from transformers import MMGroundingDinoConfig, MMGroundingDinoModel >>> # Initializing a MM Grounding DINO configuration >>> configuration = MMGroundingDinoConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MMGroundingDinoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mm-grounding-dino" sub_configs = {"backbone_config": AutoConfig, "text_config": AutoConfig} attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, text_config=None, num_queries=900, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, auxiliary_loss=False, position_embedding_type="sine", num_feature_levels=4, encoder_n_points=4, decoder_n_points=4, two_stage=True, class_cost=1.0, bbox_cost=5.0, giou_cost=2.0, bbox_loss_coefficient=5.0, giou_loss_coefficient=2.0, focal_alpha=0.25, disable_custom_kernels=False, # other parameters max_text_len=256, text_enhancer_dropout=0.0, fusion_droppath=0.1, fusion_dropout=0.0, embedding_init_target=True, query_dim=4, positional_embedding_temperature=20, init_std=0.02, layer_norm_eps=1e-5, **kwargs, ): if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") backbone_config = CONFIG_MAPPING["swin"]( window_size=7, image_size=224, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], out_indices=[2, 3, 4], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type # deformable attributes self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.two_stage = two_stage # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.focal_alpha = focal_alpha self.disable_custom_kernels = disable_custom_kernels # Text backbone if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "bert") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["bert"]() self.text_config = text_config self.max_text_len = max_text_len # Text Enhancer self.text_enhancer_dropout = text_enhancer_dropout # Fusion self.fusion_droppath = fusion_droppath self.fusion_dropout = fusion_dropout # Others self.embedding_init_target = embedding_init_target self.query_dim = query_dim self.positional_embedding_temperature = positional_embedding_temperature self.init_std = init_std self.layer_norm_eps = layer_norm_eps super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) __all__ = ["MMGroundingDinoConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py
src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch from torch import nn from ... import initialization as init from ...configuration_utils import PreTrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING, AutoConfig from ..auto.modeling_auto import AutoModel from ..grounding_dino.modeling_grounding_dino import ( GroundingDinoContrastiveEmbedding, GroundingDinoConvEncoder, GroundingDinoConvModel, GroundingDinoDecoder, GroundingDinoEncoder, GroundingDinoForObjectDetection, GroundingDinoMLPPredictionHead, GroundingDinoModel, GroundingDinoPreTrainedModel, build_position_encoding, ) logger = logging.get_logger(__name__) class MMGroundingDinoConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`MMGroundingDinoModel`]. It is used to instantiate a MM Grounding DINO model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MM Grounding DINO tiny architecture [openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det](https://huggingface.co/openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `SwinConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, defaults to `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`): The config object or dictionary of the text backbone. num_queries (`int`, *optional*, defaults to 900): Number of object queries, i.e. detection slots. This is the maximal number of objects [`MMGroundingDinoModel`] can detect in a single image. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as an encoder/decoder or not. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. num_feature_levels (`int`, *optional*, defaults to 4): The number of input feature levels. encoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the encoder. decoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the decoder. two_stage (`bool`, *optional*, defaults to `True`): Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of Grounding DINO, which are further fed into the decoder for iterative bounding box refinement. class_cost (`float`, *optional*, defaults to 1.0): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5.0): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. bbox_loss_coefficient (`float`, *optional*, defaults to 5.0): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. disable_custom_kernels (`bool`, *optional*, defaults to `False`): Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom kernels are not supported by PyTorch ONNX export. max_text_len (`int`, *optional*, defaults to 256): The maximum length of the text input. text_enhancer_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the text enhancer. fusion_droppath (`float`, *optional*, defaults to 0.1): The droppath ratio for the fusion module. fusion_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the fusion module. embedding_init_target (`bool`, *optional*, defaults to `True`): Whether to initialize the target with Embedding weights. query_dim (`int`, *optional*, defaults to 4): The dimension of the query vector. positional_embedding_temperature (`float`, *optional*, defaults to 20): The temperature for Sine Positional Embedding that is used together with vision backbone. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. Examples: ```python >>> from transformers import MMGroundingDinoConfig, MMGroundingDinoModel >>> # Initializing a MM Grounding DINO configuration >>> configuration = MMGroundingDinoConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MMGroundingDinoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mm-grounding-dino" sub_configs = {"backbone_config": AutoConfig, "text_config": AutoConfig} attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, text_config=None, num_queries=900, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, auxiliary_loss=False, position_embedding_type="sine", num_feature_levels=4, encoder_n_points=4, decoder_n_points=4, two_stage=True, class_cost=1.0, bbox_cost=5.0, giou_cost=2.0, bbox_loss_coefficient=5.0, giou_loss_coefficient=2.0, focal_alpha=0.25, disable_custom_kernels=False, # other parameters max_text_len=256, text_enhancer_dropout=0.0, fusion_droppath=0.1, fusion_dropout=0.0, embedding_init_target=True, query_dim=4, positional_embedding_temperature=20, init_std=0.02, layer_norm_eps=1e-5, **kwargs, ): if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") backbone_config = CONFIG_MAPPING["swin"]( window_size=7, image_size=224, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], out_indices=[2, 3, 4], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type # deformable attributes self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.two_stage = two_stage # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.focal_alpha = focal_alpha self.disable_custom_kernels = disable_custom_kernels # Text backbone if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "bert") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["bert"]() self.text_config = text_config self.max_text_len = max_text_len # Text Enhancer self.text_enhancer_dropout = text_enhancer_dropout # Fusion self.fusion_droppath = fusion_droppath self.fusion_dropout = fusion_dropout # Others self.embedding_init_target = embedding_init_target self.query_dim = query_dim self.positional_embedding_temperature = positional_embedding_temperature self.init_std = init_std self.layer_norm_eps = layer_norm_eps super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) class MMGroundingDinoContrastiveEmbedding(GroundingDinoContrastiveEmbedding): def __init__(self, config): super().__init__(config) self.bias = nn.Parameter(torch.tensor(0.0)) def forward( self, vision_hidden_state: torch.FloatTensor, text_hidden_state: torch.FloatTensor, text_token_mask: torch.BoolTensor, ) -> torch.FloatTensor: res = vision_hidden_state @ text_hidden_state.transpose(-1, -2) res = res / math.sqrt(vision_hidden_state.shape[-1]) res = res + self.bias res.masked_fill_(~text_token_mask[:, None, :], float("-inf")) # padding to max_text_len new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device) new_res[..., : res.shape[-1]] = res return new_res class MMGroundingDinoPreTrainedModel(GroundingDinoPreTrainedModel): @torch.no_grad() def _init_weights(self, module): super()._init_weights(module) if isinstance(module, MMGroundingDinoContrastiveEmbedding): init.constant_(module.bias, -math.log((1 - 0.01) / 0.01)) class MMGroundingDinoConvEncoder(GroundingDinoConvEncoder): pass class MMGroundingDinoConvModel(GroundingDinoConvModel): pass class MMGroundingDinoEncoder(GroundingDinoEncoder): pass class MMGroundingDinoDecoder(GroundingDinoDecoder): pass class MMGroundingDinoModel(GroundingDinoModel, MMGroundingDinoPreTrainedModel): def __init__(self, config: MMGroundingDinoConfig): MMGroundingDinoPreTrainedModel.__init__(self, config) # Create backbone + positional encoding backbone = MMGroundingDinoConvEncoder(config) position_embeddings = build_position_encoding(config) self.backbone = MMGroundingDinoConvModel(backbone, position_embeddings) # Create input projection layers num_backbone_outs = len(backbone.intermediate_channel_sizes) input_proj_list = [] for i in range(num_backbone_outs): in_channels = backbone.intermediate_channel_sizes[i] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ) for _ in range(config.num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, config.d_model), ) ) in_channels = config.d_model self.input_proj_vision = nn.ModuleList(input_proj_list) # Create text backbone self.text_backbone = AutoModel.from_config(config.text_config, add_pooling_layer=False) self.text_projection = nn.Linear(config.text_config.hidden_size, config.d_model) if config.embedding_init_target or not config.two_stage: self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) self.encoder = MMGroundingDinoEncoder(config) self.decoder = MMGroundingDinoDecoder(config) self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) self.enc_output = nn.Linear(config.d_model, config.d_model) self.enc_output_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps) self.encoder_output_bbox_embed = MMGroundingDinoMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) self.encoder_output_class_embed = MMGroundingDinoContrastiveEmbedding(config) self.post_init() class MMGroundingDinoMLPPredictionHead(GroundingDinoMLPPredictionHead): pass class MMGroundingDinoForObjectDetection(GroundingDinoForObjectDetection, MMGroundingDinoPreTrainedModel): _tied_weights_keys = { r"bbox_embed.(?![0])\d+": r"bbox_embed.0", r"class_embed.(?![0])\d+": r"^class_embed.0", "model.decoder.bbox_embed": "bbox_embed", "model.decoder.class_embed": "class_embed", } def __init__(self, config: MMGroundingDinoConfig): MMGroundingDinoPreTrainedModel.__init__(self, config) self.model = MMGroundingDinoModel(config) self.class_embed = nn.ModuleList( [MMGroundingDinoContrastiveEmbedding(config) for _ in range(config.decoder_layers)] ) self.bbox_embed = nn.ModuleList( [ MMGroundingDinoMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) for _ in range(config.decoder_layers) ] ) # Initialize weights and apply final processing self.model.decoder.class_embed = self.class_embed # class embed has no weights so nothing to tie self.model.decoder.bbox_embed = self.bbox_embed self.post_init() __all__ = [ "MMGroundingDinoConfig", "MMGroundingDinoForObjectDetection", "MMGroundingDinoModel", "MMGroundingDinoPreTrainedModel", ]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mm_grounding_dino/convert_mm_grounding_dino_to_hf.py
src/transformers/models/mm_grounding_dino/convert_mm_grounding_dino_to_hf.py
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import re import requests import torch from PIL import Image from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.grounding_dino.image_processing_grounding_dino import GroundingDinoImageProcessor from transformers.models.grounding_dino.processing_grounding_dino import GroundingDinoProcessor from transformers.models.mm_grounding_dino.configuration_mm_grounding_dino import MMGroundingDinoConfig from transformers.models.mm_grounding_dino.modeling_mm_grounding_dino import MMGroundingDinoForObjectDetection from transformers.models.swin.configuration_swin import SwinConfig MODEL_NAME_TO_CHECKPOINT_URL_MAPPING = { "mm_grounding_dino_tiny_o365v1_goldg": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg/grounding_dino_swin-t_pretrain_obj365_goldg_20231122_132602-4ea751ce.pth", "mm_grounding_dino_tiny_o365v1_goldg_grit": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_20231128_200818-169cc352.pth", "mm_grounding_dino_tiny_o365v1_goldg_v3det": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_v3det_20231218_095741-e316e297.pth", "mm_grounding_dino_tiny_o365v1_goldg_grit_v3det": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth", "mm_grounding_dino_base_o365v1_goldg_v3det": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_obj365_goldg_v3det/grounding_dino_swin-b_pretrain_obj365_goldg_v3de-f83eef00.pth", "mm_grounding_dino_base_all": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_all/grounding_dino_swin-b_pretrain_all-f9818a7c.pth", "mm_grounding_dino_large_o365v2_oiv6_goldg": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_obj365_goldg/grounding_dino_swin-l_pretrain_obj365_goldg-34dcdc53.pth", "mm_grounding_dino_large_all": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_all/grounding_dino_swin-l_pretrain_all-56d69e78.pth", "llmdet_tiny": "https://huggingface.co/fushh7/LLMDet/resolve/main/tiny.pth?download=true", "llmdet_base": "https://huggingface.co/fushh7/LLMDet/resolve/main/base.pth?download=true", "llmdet_large": "https://huggingface.co/fushh7/LLMDet/resolve/main/large.pth?download=true", } MODEL_NAME_TO_EXPECTED_OUTPUT_MAPPING = { "mm_grounding_dino_tiny_o365v1_goldg": { "scores": torch.tensor([0.7722, 0.7584, 0.7984, 0.7163]), "boxes": torch.tensor( [ [0.5212, 0.1594, 0.5792, 0.3895], [0.5424, 0.0513, 0.9996, 0.7757], [0.0629, 0.1526, 0.2746, 0.2447], [0.0091, 0.1127, 0.4945, 0.9911], ] ), }, "mm_grounding_dino_tiny_o365v1_goldg_grit": { "scores": torch.tensor([0.7865, 0.7180, 0.7665, 0.8177]), "boxes": torch.tensor( [ [0.0084, 0.1129, 0.4940, 0.9895], [0.5214, 0.1597, 0.5786, 0.3875], [0.5413, 0.0507, 0.9998, 0.7768], [0.0631, 0.1527, 0.2740, 0.2449], ] ), }, "mm_grounding_dino_tiny_o365v1_goldg_v3det": { "scores": torch.tensor([0.5690, 0.5553, 0.6075, 0.5775]), "boxes": torch.tensor( [ [0.5393, 0.0502, 0.9989, 0.7763], [0.0090, 0.1125, 0.4950, 0.9895], [0.5207, 0.1589, 0.5794, 0.3889], [0.0625, 0.1519, 0.2750, 0.2446], ] ), }, "mm_grounding_dino_tiny_o365v1_goldg_grit_v3det": { "scores": torch.tensor([0.8381, 0.8204, 0.7970, 0.7175]), "boxes": torch.tensor( [ [0.0099, 0.1129, 0.4942, 0.9903], [0.5413, 0.0506, 0.9998, 0.7753], [0.0626, 0.1527, 0.2744, 0.2443], [0.5211, 0.1596, 0.5790, 0.3890], ] ), }, "mm_grounding_dino_base_o365v1_goldg_v3det": { "scores": torch.tensor([0.8418, 0.8364, 0.8342, 0.7885]), "boxes": torch.tensor( [ [0.5427, 0.0502, 0.9996, 0.7770], [0.0628, 0.1529, 0.2747, 0.2448], [0.0085, 0.1132, 0.4947, 0.9898], [0.5208, 0.1597, 0.5787, 0.3910], ] ), }, "mm_grounding_dino_base_all": { "scores": torch.tensor([0.4713]), "boxes": torch.tensor([[0.5423, 0.0507, 0.9998, 0.7761]]), }, "mm_grounding_dino_large_o365v2_oiv6_goldg": { "scores": torch.tensor([0.7824, 0.8275, 0.7715, 0.8211]), "boxes": torch.tensor( [ [0.0082, 0.1133, 0.4945, 0.9889], [0.5410, 0.0508, 0.9998, 0.7771], [0.0632, 0.1526, 0.2740, 0.2439], [0.5205, 0.1599, 0.5787, 0.3906], ] ), }, "mm_grounding_dino_large_all": { "scores": torch.tensor([0.7373, 0.6208, 0.6913, 0.4523]), "boxes": torch.tensor( [ [0.5424, 0.0509, 0.9997, 0.7765], [0.0632, 0.1529, 0.2744, 0.2447], [0.0121, 0.1125, 0.4947, 0.9884], [0.5206, 0.1597, 0.5789, 0.3933], ] ), }, "llmdet_tiny": { "scores": torch.tensor([0.7262, 0.7552, 0.7656, 0.8207]), "boxes": torch.tensor( [ [0.0114, 0.1132, 0.4947, 0.9854], [0.5387, 0.0513, 0.9992, 0.7765], [0.5212, 0.1605, 0.5788, 0.3890], [0.0634, 0.1536, 0.2743, 0.2440], ] ), }, "llmdet_base": { "scores": torch.tensor([0.8646, 0.7567, 0.6978, 0.8084]), "boxes": torch.tensor( [ [0.0632, 0.1529, 0.2745, 0.2438], [0.5420, 0.0512, 0.9989, 0.7774], [0.0110, 0.1134, 0.4950, 0.9875], [0.5209, 0.1602, 0.5789, 0.3908], ] ), }, "llmdet_large": { "scores": torch.tensor([0.7107, 0.8626, 0.7458, 0.8166]), "boxes": torch.tensor( [ [0.0147, 0.1128, 0.4957, 0.9858], [0.0634, 0.1528, 0.2744, 0.2447], [0.5414, 0.0511, 0.9997, 0.7776], [0.5209, 0.1602, 0.5792, 0.3916], ] ), }, } # fmt: off ORIGINAL_TO_CONVERTED_KEY_MAPPING = { # vision backbone r"backbone.patch_embed.projection.(weight|bias)": r"model.backbone.conv_encoder.model.embeddings.patch_embeddings.projection.\1", r"backbone.patch_embed.norm.(weight|bias)": r"model.backbone.conv_encoder.model.embeddings.norm.\1", r"backbone.stages.(\d+).blocks.(\d+).attn.w_msa.(relative_position_bias_table|relative_position_index)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.attention.self.\3", r"backbone.stages.(\d+).blocks.(\d+).norm1.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.layernorm_before.\3", r"backbone.stages.(\d+).blocks.(\d+).attn.w_msa.(query|key|value).(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.attention.self.\3.\4", r"backbone.stages.(\d+).blocks.(\d+).attn.w_msa.proj.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.attention.output.dense.\3", r"backbone.stages.(\d+).blocks.(\d+).norm2.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.layernorm_after.\3", r"backbone.stages.(\d+).blocks.(\d+).ffn.layers.0.0.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.intermediate.dense.\3", r"backbone.stages.(\d+).blocks.(\d+).ffn.layers.1.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.output.dense.\3", r"backbone.stages.(\d+).downsample.reduction.weight": r"model.backbone.conv_encoder.model.encoder.layers.\1.downsample.reduction.weight", r"backbone.stages.(\d+).downsample.norm.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.downsample.norm.\2", r"backbone.norms.(\d+).(weight|bias)": r"model.backbone.conv_encoder.model.hidden_states_norms.stage\1.\2", r"neck.convs.(\d+).conv.(weight|bias)": r"model.input_proj_vision.\1.0.\2", r"neck.convs.(\d+).gn.(weight|bias)": r"model.input_proj_vision.\1.1.\2", r"neck.extra_convs.(\d+).conv.(weight|bias)": r"model.input_proj_vision.\1.0.\2", r"neck.extra_convs.(\d+).gn.(weight|bias)": r"model.input_proj_vision.\1.1.\2", # text backbone r"language_model.language_backbone.body.model.(.*)": r"model.text_backbone.\1", r"text_feat_map.(weight|bias)": r"model.text_projection.\1", # encoder r"encoder.fusion_layers.(\d+).gamma_v": r"model.encoder.layers.\1.fusion_layer.vision_param", r"encoder.fusion_layers.(\d+).gamma_l": r"model.encoder.layers.\1.fusion_layer.text_param", r"encoder.fusion_layers.(\d+).layer_norm_v.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.layer_norm_vision.\2", r"encoder.fusion_layers.(\d+).attn.v_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.vision_proj.\2", r"encoder.fusion_layers.(\d+).attn.values_v_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.values_vision_proj.\2", r"encoder.fusion_layers.(\d+).attn.out_v_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.out_vision_proj.\2", r"encoder.fusion_layers.(\d+).layer_norm_l.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.layer_norm_text.\2", r"encoder.fusion_layers.(\d+).attn.l_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.text_proj.\2", r"encoder.fusion_layers.(\d+).attn.values_l_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.values_text_proj.\2", r"encoder.fusion_layers.(\d+).attn.out_l_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.out_text_proj.\2", r"encoder.layers.(\d+).self_attn.(sampling_offsets|attention_weights|value_proj|output_proj).(weight|bias)": r"model.encoder.layers.\1.deformable_layer.self_attn.\2.\3", r"encoder.layers.(\d+).norms.0.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.self_attn_layer_norm.\2", r"encoder.layers.(\d+).ffn.layers.0.0.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.fc1.\2", r"encoder.layers.(\d+).ffn.layers.1.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.fc2.\2", r"encoder.layers.(\d+).norms.1.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.final_layer_norm.\2", r"encoder.text_layers.(\d+).self_attn.attn.(query|key|value)_proj_(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.self_attn.\2.\3", r"encoder.text_layers.(\d+).self_attn.attn.out_proj.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.self_attn.out_proj.\2", r"encoder.text_layers.(\d+).norms.0.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.layer_norm_before.\2", r"encoder.text_layers.(\d+).ffn.layers.0.0.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.fc1.\2", r"encoder.text_layers.(\d+).ffn.layers.1.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.fc2.\2", r"encoder.text_layers.(\d+).norms.1.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.layer_norm_after.\2", r"encoder.bbox_head.cls_branch.bias": r"model.encoder_output_class_embed.bias", r"encoder.bbox_head.reg_branch.0.(weight|bias)": r"model.encoder_output_bbox_embed.layers.0.\1", r"encoder.bbox_head.reg_branch.2.(weight|bias)": r"model.encoder_output_bbox_embed.layers.1.\1", r"encoder.bbox_head.reg_branch.4.(weight|bias)": r"model.encoder_output_bbox_embed.layers.2.\1", # decoder r"decoder.norm.(weight|bias)": r"model.decoder.layer_norm.\1", r"decoder.ref_point_head.layers.(\d+).(weight|bias)": r"model.decoder.reference_points_head.layers.\1.\2", r"decoder.layers.(\d+).self_attn.attn.(query|key|value)_proj_(weight|bias)": r"model.decoder.layers.\1.self_attn.\2.\3", r"decoder.layers.(\d+).self_attn.attn.out_proj.(weight|bias)": r"model.decoder.layers.\1.self_attn.out_proj.\2", r"decoder.layers.(\d+).norms.0.(weight|bias)": r"model.decoder.layers.\1.self_attn_layer_norm.\2", r"decoder.layers.(\d+).cross_attn_text.attn.(query|key|value)_proj_(weight|bias)": r"model.decoder.layers.\1.encoder_attn_text.\2.\3", r"decoder.layers.(\d+).cross_attn_text.attn.out_proj.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_text.out_proj.\2", r"decoder.layers.(\d+).norms.1.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_text_layer_norm.\2", r"decoder.layers.(\d+).cross_attn.(sampling_offsets|attention_weights|value_proj|output_proj).(weight|bias)": r"model.decoder.layers.\1.encoder_attn.\2.\3", r"decoder.layers.(\d+).norms.2.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_layer_norm.\2", r"decoder.layers.(\d+).ffn.layers.0.0.(weight|bias)": r"model.decoder.layers.\1.fc1.\2", r"decoder.layers.(\d+).ffn.layers.1.(weight|bias)": r"model.decoder.layers.\1.fc2.\2", r"decoder.layers.(\d+).norms.3.(weight|bias)": r"model.decoder.layers.\1.final_layer_norm.\2", r"decoder.bbox_head.cls_branches.(\d+).bias": r"model.decoder.class_embed.\1.bias", r"decoder.bbox_head.reg_branches.(\d+).0.(weight|bias)": r"model.decoder.bbox_embed.\1.layers.0.\2", r"decoder.bbox_head.reg_branches.(\d+).2.(weight|bias)": r"model.decoder.bbox_embed.\1.layers.1.\2", r"decoder.bbox_head.reg_branches.(\d+).4.(weight|bias)": r"model.decoder.bbox_embed.\1.layers.2.\2", # other r"level_embed": r"model.level_embed", r"query_embedding.weight": r"model.query_position_embeddings.weight", r"memory_trans_fc.(weight|bias)": r"model.enc_output.\1", r"memory_trans_norm.(weight|bias)": r"model.enc_output_norm.\1", r"bbox_head.cls_branches.(\d+).bias": r"class_embed.\1.bias", r"bbox_head.reg_branches.(\d+).0.(weight|bias)": r"bbox_embed.\1.layers.0.\2", r"bbox_head.reg_branches.(\d+).2.(weight|bias)": r"bbox_embed.\1.layers.1.\2", r"bbox_head.reg_branches.(\d+).4.(weight|bias)": r"bbox_embed.\1.layers.2.\2", } # fmt: on def get_mm_grounding_dino_config(model_name: str) -> MMGroundingDinoConfig: if "tiny" in model_name: swin_image_size = 224 swin_window_size = 7 swin_embed_dim = 96 swin_depths = (2, 2, 6, 2) swin_num_heads = (3, 6, 12, 24) swin_out_features = ["stage2", "stage3", "stage4"] num_feature_levels = 4 elif "base" in model_name: swin_image_size = 384 swin_window_size = 12 swin_embed_dim = 128 swin_depths = (2, 2, 18, 2) swin_num_heads = (4, 8, 16, 32) swin_out_features = ["stage2", "stage3", "stage4"] num_feature_levels = 4 elif "large" in model_name: swin_image_size = 384 swin_window_size = 12 swin_embed_dim = 192 swin_depths = (2, 2, 18, 2) swin_num_heads = (6, 12, 24, 48) swin_out_features = ["stage1", "stage2", "stage3", "stage4"] num_feature_levels = 5 else: raise ValueError( f"Model name: {model_name} is not supported. Only `tiny`, `base` and `large` models are currently supported." ) backbone_config = SwinConfig( image_size=swin_image_size, window_size=swin_window_size, embed_dim=swin_embed_dim, depths=swin_depths, num_heads=swin_num_heads, out_features=swin_out_features, ) model_config = MMGroundingDinoConfig( backbone_config=backbone_config, num_feature_levels=num_feature_levels, ) return model_config def get_mm_grounding_dino_processor() -> GroundingDinoProcessor: img_processor = GroundingDinoImageProcessor() txt_processor = BertTokenizer.from_pretrained("bert-base-uncased") processor = GroundingDinoProcessor(img_processor, txt_processor) return processor # Copied from: https://github.com/iSEE-Laboratory/LLMDet/blob/96ec8c82a9d97b170db759e043afd5b81445d0f1/hf_model/mmdet2groundingdino_swint.py#L8C1-L13C13 def correct_unfold_reduction_order(x: torch.Tensor) -> torch.Tensor: out_channel, in_channel = x.shape x = x.reshape(out_channel, in_channel // 4, 4).transpose(1, 2) x = x[:, [0, 2, 1, 3], :] x = x.reshape(out_channel, in_channel) return x # Copied from: https://github.com/iSEE-Laboratory/LLMDet/blob/96ec8c82a9d97b170db759e043afd5b81445d0f1/hf_model/mmdet2groundingdino_swint.py#L15C1-L20C13 def correct_unfold_norm_order(x: torch.Tensor) -> torch.Tensor: in_channel = x.shape[0] x = x.reshape(in_channel // 4, 4).transpose(0, 1) x = x[[0, 2, 1, 3], :] x = x.reshape(in_channel) return x def preprocess_old_state(state_dict: dict, config: MMGroundingDinoConfig) -> dict: """ Preprocesses old state dict to enable 1-1 mapping: - split qkv projections in Swin backbone - reorder reduction and norm parameters in Swin backbone - shift output norm indices in Swin backbone - shift output proj indices in neck - split q,k,v projections in text self and cross attentions in encoder and decoder - duplicate detection head parameters for decoder and encoder """ new_state_dict = state_dict.copy() for k in state_dict: if k.startswith("backbone"): if "downsample.reduction" in k: new_state_dict[k] = correct_unfold_reduction_order(new_state_dict.pop(k)) elif "downsample.norm" in k: new_state_dict[k] = correct_unfold_norm_order(new_state_dict.pop(k)) elif "w_msa.qkv" in k: q_param, k_param, v_param = new_state_dict.pop(k).chunk(3) new_state_dict[k.replace("qkv", "query")] = q_param new_state_dict[k.replace("qkv", "key")] = k_param new_state_dict[k.replace("qkv", "value")] = v_param elif "backbone.norm" in k: match = re.match(r"backbone.norm(\d+).(weight|bias)", k) new_state_dict[f"backbone.norms.{int(match.group(1)) + 1}.{match.group(2)}"] = new_state_dict.pop(k) elif k.startswith("neck.extra_convs"): num_normal_convs = len(config.backbone_config.out_indices) if "gn" in k: match = re.match(r"neck.extra_convs.(\d+).gn.(weight|bias)", k) new_state_dict[f"neck.extra_convs.{num_normal_convs + int(match.group(1))}.gn.{match.group(2)}"] = ( new_state_dict.pop(k) ) elif "conv" in k: match = re.match(r"neck.extra_convs.(\d+).conv.(weight|bias)", k) new_state_dict[f"neck.extra_convs.{num_normal_convs + int(match.group(1))}.conv.{match.group(2)}"] = ( new_state_dict.pop(k) ) elif k.startswith("encoder"): if "self_attn.attn.in_proj" in k: q_param, k_param, v_param = new_state_dict.pop(k).chunk(3) new_state_dict[k.replace("in", "query")] = q_param new_state_dict[k.replace("in", "key")] = k_param new_state_dict[k.replace("in", "value")] = v_param elif k.startswith("decoder"): if "self_attn.attn.in_proj" in k or "cross_attn_text.attn.in_proj" in k: q_param, k_param, v_param = new_state_dict.pop(k).chunk(3) new_state_dict[k.replace("in", "query")] = q_param new_state_dict[k.replace("in", "key")] = k_param new_state_dict[k.replace("in", "value")] = v_param elif k.startswith("bbox_head"): num_decoder_layers = config.decoder_layers match = re.match(r"bbox_head.(cls|reg)_branches.(\d+).(.*)", k) cls_or_reg = match.group(1) layer_idx = int(match.group(2)) suffix = match.group(3) if layer_idx < num_decoder_layers: new_key = f"decoder.bbox_head.{cls_or_reg}_branches.{layer_idx}.{suffix}" new_state_dict[new_key] = new_state_dict[k] # copy else: new_key = f"encoder.bbox_head.{cls_or_reg}_branch.{suffix}" new_state_dict[new_key] = new_state_dict.pop(k) # move # remove unused params if ( k == "dn_query_generator.label_embedding.weight" or k == "language_model.language_backbone.body.model.embeddings.position_ids" or k == "image_seperate.weight" or k.startswith("lmm") or k.startswith("connector") or k.startswith("region_connector") or k.startswith("ref_point_head") ): new_state_dict.pop(k) return new_state_dict # Copied from transformers/models/siglip2/convert_siglip2_to_hf.py def convert_old_keys_to_new_keys(state_dict_keys: list) -> dict: """ This function should be applied only once, on the concatenated keys to efficiently rename using the key mappings. """ output_dict = {} if state_dict_keys is not None: old_text = "\n".join(state_dict_keys) new_text = old_text for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items(): if replacement is None: new_text = re.sub(pattern, "", new_text) # an empty line continue new_text = re.sub(pattern, replacement, new_text) output_dict = dict(zip(old_text.split("\n"), new_text.split("\n"))) return output_dict def convert_mm_to_hf_state(original_state: dict, hf_cfg: MMGroundingDinoConfig) -> dict: original_state = preprocess_old_state(original_state, hf_cfg) original_state_keys = list(original_state.keys()) original_to_hf_key_map = convert_old_keys_to_new_keys(original_state_keys) hf_state = {} for original_key in original_state_keys: hf_key = original_to_hf_key_map[original_key] hf_state[hf_key] = original_state.pop(original_key) return hf_state def prepare_test_inputs(): image_url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(image_url, stream=True).raw) text = [["cat", "remote"]] return image, text @torch.no_grad() def convert_mm_grounding_dino_checkpoint( model_name: str, verify_outputs: bool, push_to_hub: bool, hub_user_name: str, ) -> tuple[MMGroundingDinoConfig, dict]: # Load original state checkpoint_url = MODEL_NAME_TO_CHECKPOINT_URL_MAPPING[model_name] print(f"Loading checkpoint from: {checkpoint_url}") ckpt = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") mm_state = ckpt["state_dict"] # Create hf model and processor print("Creating model...") hf_cfg = get_mm_grounding_dino_config(model_name) hf_state = convert_mm_to_hf_state(mm_state, hf_cfg) hf_model = MMGroundingDinoForObjectDetection(hf_cfg).eval() hf_model.load_state_dict(hf_state) hf_processor = get_mm_grounding_dino_processor() # Verify outputs if needed if verify_outputs: print("Running inference to verify outputs...") image, text = prepare_test_inputs() model_inputs = hf_processor(images=image, text=text, return_tensors="pt") model_outputs = hf_model(**model_inputs) results = hf_processor.post_process_grounded_object_detection( model_outputs, model_inputs.input_ids, box_threshold=0.4, text_threshold=0.3, ) result = results[0] print(result) expected = MODEL_NAME_TO_EXPECTED_OUTPUT_MAPPING[model_name] for key in expected: torch.testing.assert_close(result[key], expected[key], atol=1e-3, rtol=1e-3) print("Outputs match.") # Push to hub if needed if push_to_hub: print("Pushing to hub...") hub_url = f"{hub_user_name}/{model_name}" hf_model.push_to_hub(hub_url) hf_processor.push_to_hub(hub_url) print(f"Pushed to huggingface hub at: {hub_url}.") return hf_cfg, hf_state def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model-name", required=True, type=str, choices=list(MODEL_NAME_TO_CHECKPOINT_URL_MAPPING.keys()), help="URL to the original mm grounding dino checkpoint.", ) parser.add_argument("--hub-user-name", type=str, help="User name on the huggingface hub.") parser.add_argument("--push-to-hub", action="store_true", help="Whether to push model to hub or not.") parser.add_argument( "--verify-outputs", action="store_true", help="Whether to verify that model output is correct or not." ) return parser.parse_args() if __name__ == "__main__": args = parse_args() convert_mm_grounding_dino_checkpoint( args.model_name, args.verify_outputs, args.push_to_hub, args.hub_user_name, )
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mm_grounding_dino/__init__.py
src/transformers/models/mm_grounding_dino/__init__.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_mm_grounding_dino import * from .modeling_mm_grounding_dino import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py
src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_mm_grounding_dino.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import warnings from dataclasses import dataclass from typing import Optional, Union import torch import torch.nn.functional as F from torch import Tensor, nn from ... import initialization as init from ...activations import ACT2FN from ...file_utils import ModelOutput, is_timm_available, requires_backends from ...integrations import use_kernel_forward_from_hub from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import auto_docstring from ...utils.backbone_utils import load_backbone from ..auto.modeling_auto import AutoModel from .configuration_mm_grounding_dino import MMGroundingDinoConfig if is_timm_available(): from timm import create_model class MMGroundingDinoContrastiveEmbedding(nn.Module): def __init__(self, config): super().__init__() self.max_text_len = config.max_text_len self.bias = nn.Parameter(torch.tensor(0.0)) def forward( self, vision_hidden_state: torch.FloatTensor, text_hidden_state: torch.FloatTensor, text_token_mask: torch.BoolTensor, ) -> torch.FloatTensor: res = vision_hidden_state @ text_hidden_state.transpose(-1, -2) res = res / math.sqrt(vision_hidden_state.shape[-1]) res = res + self.bias res.masked_fill_(~text_token_mask[:, None, :], float("-inf")) # padding to max_text_len new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device) new_res[..., : res.shape[-1]] = res return new_res @use_kernel_forward_from_hub("MultiScaleDeformableAttention") class MultiScaleDeformableAttention(nn.Module): def forward( self, value: Tensor, value_spatial_shapes: Tensor, value_spatial_shapes_list: list[tuple], level_start_index: Tensor, sampling_locations: Tensor, attention_weights: Tensor, im2col_step: int, ): batch_size, _, num_heads, hidden_dim = value.shape _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape value_list = value.split([height * width for height, width in value_spatial_shapes_list], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for level_id, (height, width) in enumerate(value_spatial_shapes_list): # batch_size, height*width, num_heads, hidden_dim # -> batch_size, height*width, num_heads*hidden_dim # -> batch_size, num_heads*hidden_dim, height*width # -> batch_size*num_heads, hidden_dim, height, width value_l_ = ( value_list[level_id] .flatten(2) .transpose(1, 2) .reshape(batch_size * num_heads, hidden_dim, height, width) ) # batch_size, num_queries, num_heads, num_points, 2 # -> batch_size, num_heads, num_queries, num_points, 2 # -> batch_size*num_heads, num_queries, num_points, 2 sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) # batch_size*num_heads, hidden_dim, num_queries, num_points sampling_value_l_ = nn.functional.grid_sample( value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False, ) sampling_value_list.append(sampling_value_l_) # (batch_size, num_queries, num_heads, num_levels, num_points) # -> (batch_size, num_heads, num_queries, num_levels, num_points) # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) attention_weights = attention_weights.transpose(1, 2).reshape( batch_size * num_heads, 1, num_queries, num_levels * num_points ) output = ( (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) .sum(-1) .view(batch_size, num_heads * hidden_dim, num_queries) ) return output.transpose(1, 2).contiguous() class MMGroundingDinoLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, config): super().__init__() embedding_dim = config.d_model // 2 self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos class MMGroundingDinoMultiscaleDeformableAttention(nn.Module): """ Multiscale deformable attention as proposed in Deformable DETR. """ def __init__(self, config: MMGroundingDinoConfig, num_heads: int, n_points: int): super().__init__() self.attn = MultiScaleDeformableAttention() if config.d_model % num_heads != 0: raise ValueError( f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}" ) dim_per_head = config.d_model // num_heads # check if dim_per_head is power of 2 if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): warnings.warn( "You'd better set embed_dim (d_model) in MMGroundingDinoMultiscaleDeformableAttention to make the" " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" " implementation." ) self.im2col_step = 64 self.d_model = config.d_model self.n_levels = config.num_feature_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2) self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points) self.value_proj = nn.Linear(config.d_model, config.d_model) self.output_proj = nn.Linear(config.d_model, config.d_model) self.disable_custom_kernels = config.disable_custom_kernels def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) batch_size, num_queries, _ = hidden_states.shape batch_size, sequence_length, _ = encoder_hidden_states.shape # Ignore copy if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: raise ValueError( "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" ) value = self.value_proj(encoder_hidden_states) if attention_mask is not None: # we invert the attention_mask value = value.masked_fill(~attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 ) attention_weights = self.attention_weights(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels * self.n_points ) attention_weights = F.softmax(attention_weights, -1).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points ) # batch_size, num_queries, n_heads, n_levels, n_points, 2 num_coordinates = reference_points.shape[-1] if num_coordinates == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = ( reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] ) elif num_coordinates == 4: sampling_locations = ( reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 ) else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") output = self.attn( value, spatial_shapes, spatial_shapes_list, level_start_index, sampling_locations, attention_weights, self.im2col_step, ) output = self.output_proj(output) return output, attention_weights class MMGroundingDinoBiMultiHeadAttention(nn.Module): def __init__(self, config): super().__init__() vision_dim = text_dim = config.d_model embed_dim = config.encoder_ffn_dim // 2 num_heads = config.encoder_attention_heads // 2 dropout = config.fusion_dropout self.embed_dim = embed_dim self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.vision_dim = vision_dim self.text_dim = text_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by `num_heads` (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." ) self.scale = self.head_dim ** (-0.5) self.dropout = dropout self.vision_proj = nn.Linear(self.vision_dim, self.embed_dim) self.text_proj = nn.Linear(self.text_dim, self.embed_dim) self.values_vision_proj = nn.Linear(self.vision_dim, self.embed_dim) self.values_text_proj = nn.Linear(self.text_dim, self.embed_dim) self.out_vision_proj = nn.Linear(self.embed_dim, self.vision_dim) self.out_text_proj = nn.Linear(self.embed_dim, self.text_dim) def _reshape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, vision_features: torch.FloatTensor, text_features: torch.FloatTensor, vision_attention_mask: Optional[torch.BoolTensor] = None, text_attention_mask: Optional[torch.BoolTensor] = None, ) -> tuple[tuple[torch.FloatTensor, torch.FloatTensor], tuple[torch.FloatTensor, torch.FloatTensor]]: """Image-to-text and text-to-image cross-attention Args: vision_features (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_dim)`): Projected flattened image features generated by the vision backbone. text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`): Projected text features generated by the text encoder. vision_attention_mask (`torch.BoolTensor`, **optional**): Attention mask for image-to-text cross-attention. False for real tokens and True for padding tokens. text_attention_mask (`torch.BoolTensor`, **optional**): Attention mask for text-to-image cross-attention. False for real tokens and True for padding tokens. Returns: `tuple(tuple(torch.FloatTensor), tuple(torch.FloatTensor))` where each inner tuple comprises an attention output and weights: - **vision_attn_output** (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_din)`) -- Output of the image-to-text cross-attention layer. - **vision_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, vision_sequence_length, vision_sequence_length)`) -- Attention weights of the image-to-text cross-attention layer. - **text_attn_output** (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`) -- Output of the text-to-image cross-attention layer. - **text_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, text_sequence_length, text_sequence_length)`) -- Attention weights of the text-to-image cross-attention layer. """ batch_size, tgt_len, _ = vision_features.size() vision_query_states = self.vision_proj(vision_features) * self.scale vision_query_states = self._reshape(vision_query_states, tgt_len, batch_size) text_key_states = self.text_proj(text_features) text_key_states = self._reshape(text_key_states, -1, batch_size) vision_value_states = self.values_vision_proj(vision_features) vision_value_states = self._reshape(vision_value_states, -1, batch_size) text_value_states = self.values_text_proj(text_features) text_value_states = self._reshape(text_value_states, -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) vision_query_states = vision_query_states.view(*proj_shape) text_key_states = text_key_states.view(*proj_shape) vision_value_states = vision_value_states.view(*proj_shape) text_value_states = text_value_states.view(*proj_shape) src_len = text_key_states.size(1) attn_weights = torch.bmm(vision_query_states, text_key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt if attn_weights.size() != (batch_size * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" ) attn_weights = attn_weights - attn_weights.max() # Do not increase -50000/50000, data type half has quite limited range attn_weights = torch.clamp(attn_weights, min=-50000, max=50000) attn_weights_transposed = attn_weights.transpose(1, 2) text_attn_weights = attn_weights_transposed - torch.max(attn_weights_transposed, dim=-1, keepdim=True)[0] # Do not increase -50000/50000, data type half has quite limited range text_attn_weights = torch.clamp(text_attn_weights, min=-50000, max=50000) # mask vision for language if vision_attention_mask is not None: vision_attention_mask = ( vision_attention_mask[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) ) text_attn_weights.masked_fill_(vision_attention_mask, float("-inf")) text_attn_weights = text_attn_weights.softmax(dim=-1) # mask language for vision if text_attention_mask is not None: text_attention_mask = text_attention_mask[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) attn_weights.masked_fill_(text_attention_mask, float("-inf")) vision_attn_weights = attn_weights.softmax(dim=-1) vision_attn_probs = F.dropout(vision_attn_weights, p=self.dropout, training=self.training) text_attn_probs = F.dropout(text_attn_weights, p=self.dropout, training=self.training) vision_attn_output = torch.bmm(vision_attn_probs, text_value_states) text_attn_output = torch.bmm(text_attn_probs, vision_value_states) if vision_attn_output.size() != (batch_size * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`vision_attn_output` should be of size {(batch_size, self.num_heads, tgt_len, self.head_dim)}, but is {vision_attn_output.size()}" ) if text_attn_output.size() != (batch_size * self.num_heads, src_len, self.head_dim): raise ValueError( f"`text_attn_output` should be of size {(batch_size, self.num_heads, src_len, self.head_dim)}, but is {text_attn_output.size()}" ) vision_attn_output = vision_attn_output.view(batch_size, self.num_heads, tgt_len, self.head_dim) vision_attn_output = vision_attn_output.transpose(1, 2) vision_attn_output = vision_attn_output.reshape(batch_size, tgt_len, self.embed_dim) text_attn_output = text_attn_output.view(batch_size, self.num_heads, src_len, self.head_dim) text_attn_output = text_attn_output.transpose(1, 2) text_attn_output = text_attn_output.reshape(batch_size, src_len, self.embed_dim) vision_attn_output = self.out_vision_proj(vision_attn_output) text_attn_output = self.out_text_proj(text_attn_output) return (vision_attn_output, vision_attn_weights), (text_attn_output, text_attn_weights) def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output class MMGroundingDinoDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" class MMGroundingDinoFusionLayer(nn.Module): def __init__(self, config): super().__init__() drop_path = config.fusion_droppath # pre layer norm self.layer_norm_vision = nn.LayerNorm(config.d_model, config.layer_norm_eps) self.layer_norm_text = nn.LayerNorm(config.d_model, config.layer_norm_eps) self.attn = MMGroundingDinoBiMultiHeadAttention(config) # add layer scale for training stability self.drop_path = MMGroundingDinoDropPath(drop_path) if drop_path > 0.0 else nn.Identity() init_values = 1e-4 self.vision_param = nn.Parameter(init_values * torch.ones(config.d_model), requires_grad=True) self.text_param = nn.Parameter(init_values * torch.ones(config.d_model), requires_grad=True) def forward( self, vision_features: torch.FloatTensor, text_features: torch.FloatTensor, attention_mask_vision: Optional[torch.BoolTensor] = None, attention_mask_text: Optional[torch.BoolTensor] = None, ) -> tuple[tuple[torch.FloatTensor, torch.FloatTensor], tuple[torch.FloatTensor, torch.FloatTensor]]: """Image and text features fusion Args: vision_features (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_dim)`): Projected flattened image features generated by the vision backbone. text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`): Projected text features generated by the text encoder. attention_mask_vision (`torch.BoolTensor`, **optional**): Attention mask for image-to-text cross-attention. False for real tokens and True for padding tokens. attention_mask_text (`torch.BoolTensor`, **optional**): Attention mask for text-to-image cross-attention. False for real tokens and True for padding tokens. Returns: `tuple(tuple(torch.FloatTensor), tuple(torch.FloatTensor))` where each inner tuple comprises an enhanced feature and attention output and weights: - **vision_features** (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, vision_dim)`) -- Updated vision features with attention output from image-to-text cross-attention layer. - **vision_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, vision_sequence_length, vision_sequence_length)`) -- Attention weights of the image-to-text cross-attention layer. - **text_features** (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, text_dim)`) -- Updated text features with attention output from text-to-image cross-attention layer. - **text_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, text_sequence_length, text_sequence_length)`) -- Attention weights of the text-to-image cross-attention layer. """ vision_features = self.layer_norm_vision(vision_features) text_features = self.layer_norm_text(text_features) (delta_v, vision_attn), (delta_t, text_attn) = self.attn( vision_features, text_features, vision_attention_mask=attention_mask_vision, text_attention_mask=attention_mask_text, ) vision_features = vision_features + self.drop_path(self.vision_param * delta_v) text_features = text_features + self.drop_path(self.text_param * delta_t) return (vision_features, vision_attn), (text_features, text_attn) @auto_docstring class MMGroundingDinoPreTrainedModel(PreTrainedModel): config: MMGroundingDinoConfig base_model_prefix = "model" main_input_name = "pixel_values" input_modalities = ("image", "text") @torch.no_grad() def _init_weights(self, module): std = self.config.init_std if isinstance(module, MMGroundingDinoLearnedPositionEmbedding): init.uniform_(module.row_embeddings.weight) init.uniform_(module.column_embeddings.weight) elif isinstance(module, MMGroundingDinoMultiscaleDeformableAttention): init.constant_(module.sampling_offsets.weight, 0.0) default_dtype = torch.get_default_dtype() thetas = torch.arange(module.n_heads, dtype=torch.int64).to(default_dtype) * ( 2.0 * math.pi / module.n_heads ) grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) grid_init = ( (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) .view(module.n_heads, 1, 1, 2) .repeat(1, module.n_levels, module.n_points, 1) ) for i in range(module.n_points): grid_init[:, :, i, :] *= i + 1 init.copy_(module.sampling_offsets.bias, grid_init.view(-1)) init.constant_(module.attention_weights.weight, 0.0) init.constant_(module.attention_weights.bias, 0.0) init.xavier_uniform_(module.value_proj.weight) init.constant_(module.value_proj.bias, 0.0) init.xavier_uniform_(module.output_proj.weight) init.constant_(module.output_proj.bias, 0.0) elif isinstance(module, MMGroundingDinoBiMultiHeadAttention): init.xavier_uniform_(module.vision_proj.weight) init.zeros_(module.vision_proj.bias) init.xavier_uniform_(module.text_proj.weight) init.zeros_(module.text_proj.bias) init.xavier_uniform_(module.values_vision_proj.weight) init.zeros_(module.values_vision_proj.bias) init.xavier_uniform_(module.values_text_proj.weight) init.zeros_(module.values_text_proj.bias) init.xavier_uniform_(module.out_vision_proj.weight) init.zeros_(module.out_vision_proj.bias) init.xavier_uniform_(module.out_text_proj.weight) init.zeros_(module.out_text_proj.bias) elif isinstance(module, MMGroundingDinoFusionLayer): init.constant_(module.vision_param, 1e-4) init.constant_(module.text_param, 1e-4) elif isinstance(module, (nn.Linear, nn.Conv2d)): init.normal_(module.weight, mean=0.0, std=std) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): init.ones_(module.weight) init.zeros_(module.bias) elif isinstance(module, nn.Embedding): init.normal_(module.weight, mean=0.0, std=std) # Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False): init.zeros_(module.weight[module.padding_idx]) elif isinstance(module, MMGroundingDinoMLPPredictionHead): init.constant_(module.layers[-1].weight, 0) init.constant_(module.layers[-1].bias, 0) if hasattr(module, "reference_points") and not self.config.two_stage: init.xavier_uniform_(module.reference_points.weight, gain=1.0) init.constant_(module.reference_points.bias, 0.0) if hasattr(module, "level_embed"): init.normal_(module.level_embed) if isinstance(module, MMGroundingDinoContrastiveEmbedding): init.constant_(module.bias, -math.log((1 - 0.01) / 0.01)) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, MMGroundingDinoDecoder): module.gradient_checkpointing = value class MMGroundingDinoFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias def replace_batch_norm(model): r""" Recursively replace all `torch.nn.BatchNorm2d` with `MMGroundingDinoFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model """ for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = MMGroundingDinoFrozenBatchNorm2d(module.num_features) if module.weight.device != torch.device("meta"): new_module.weight.copy_(module.weight) new_module.bias.copy_(module.bias) new_module.running_mean.copy_(module.running_mean) new_module.running_var.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module) class MMGroundingDinoConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by MMGroundingDinoFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() self.config = config if config.use_timm_backbone: requires_backends(self, ["timm"]) backbone = create_model( config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, **config.backbone_kwargs, ) else: backbone = load_backbone(config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = ( self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels ) backbone_model_type = None if config.backbone is not None: backbone_model_type = config.backbone elif config.backbone_config is not None: backbone_model_type = config.backbone_config.model_type else: raise ValueError("Either `backbone` or `backbone_config` should be provided in the config") if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): if config.use_timm_backbone: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: parameter.requires_grad_(False) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/timm_backbone/configuration_timm_backbone.py
src/transformers/models/timm_backbone/configuration_timm_backbone.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Configuration for Backbone models""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class TimmBackboneConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`]. It is used to instantiate a timm backbone model according to the specified arguments, defining the model. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: backbone (`str`, *optional*): The timm checkpoint to load. num_channels (`int`, *optional*, defaults to 3): The number of input channels. features_only (`bool`, *optional*, defaults to `True`): Whether to output only the features or also the logits. use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use a pretrained backbone. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). Will default to the last stage if unset. freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`): Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. Example: ```python >>> from transformers import TimmBackboneConfig, TimmBackbone >>> # Initializing a timm backbone >>> configuration = TimmBackboneConfig("resnet50") >>> # Initializing a model from the configuration >>> model = TimmBackbone(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "timm_backbone" def __init__( self, backbone=None, num_channels=3, features_only=True, use_pretrained_backbone=True, out_indices=None, freeze_batch_norm_2d=False, **kwargs, ): super().__init__(**kwargs) self.backbone = backbone self.num_channels = num_channels self.features_only = features_only self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = True self.out_indices = out_indices if out_indices is not None else [-1] self.freeze_batch_norm_2d = freeze_batch_norm_2d __all__ = ["TimmBackboneConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/timm_backbone/modeling_timm_backbone.py
src/transformers/models/timm_backbone/modeling_timm_backbone.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm from ...integrations.timm import _maybe_reinit_non_persistent_buffer if is_torch_available(): from torch import Tensor class TimmBackbone(PreTrainedModel, BackboneMixin): """ Wrapper class for timm models to be used as backbones. This enables using the timm models interchangeably with the other models in the library keeping the same API. """ main_input_name = "pixel_values" input_modalities = ("image",) supports_gradient_checkpointing = False config: TimmBackboneConfig def __init__(self, config, **kwargs): requires_backends(self, "timm") super().__init__(config) self.config = config if config.backbone is None: raise ValueError("backbone is not set in the config. Please set it to a timm model name.") if hasattr(config, "out_features") and config.out_features is not None: raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.") pretrained = getattr(config, "use_pretrained_backbone", None) if pretrained is None: raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.") # We just take the final layer by default. This matches the default for the transformers models. out_indices = config.out_indices if getattr(config, "out_indices", None) is not None else (-1,) in_chans = kwargs.pop("in_chans", config.num_channels) self._backbone = timm.create_model( config.backbone, pretrained=pretrained, # This is currently not possible for transformer architectures. features_only=config.features_only, in_chans=in_chans, out_indices=out_indices, **kwargs, ) # Converts all `BatchNorm2d` and `SyncBatchNorm` or `BatchNormAct2d` and `SyncBatchNormAct2d` layers of provided module into `FrozenBatchNorm2d` or `FrozenBatchNormAct2d` respectively if getattr(config, "freeze_batch_norm_2d", False): self.freeze_batch_norm_2d() # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. self._return_layers = { layer["module"]: str(layer["index"]) for layer in self._backbone.feature_info.get_dicts() } self._all_layers = {layer["module"]: str(i) for i, layer in enumerate(self._backbone.feature_info.info)} super()._init_backbone(config) self.post_init() @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): requires_backends(cls, ["vision", "timm"]) config = kwargs.pop("config", TimmBackboneConfig()) use_timm = kwargs.pop("use_timm_backbone", True) if not use_timm: raise ValueError("use_timm_backbone must be True for timm backbones") num_channels = kwargs.pop("num_channels", config.num_channels) features_only = kwargs.pop("features_only", config.features_only) use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone) out_indices = kwargs.pop("out_indices", config.out_indices) config = TimmBackboneConfig( backbone=pretrained_model_name_or_path, num_channels=num_channels, features_only=features_only, use_pretrained_backbone=use_pretrained_backbone, out_indices=out_indices, ) return super()._from_config(config, **kwargs) def freeze_batch_norm_2d(self): timm.utils.model.freeze_batch_norm_2d(self._backbone) def unfreeze_batch_norm_2d(self): timm.utils.model.unfreeze_batch_norm_2d(self._backbone) @torch.no_grad() def _init_weights(self, module): """We need to at least re-init the non-persistent buffers if the model was initialized on meta device (we assume weights and persistent buffers will be part of checkpoint as we have no way to control timm inits)""" _maybe_reinit_non_persistent_buffer(module) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[BackboneOutput, tuple[Tensor, ...]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("Cannot output attentions for timm backbones at the moment") if output_hidden_states: # We modify the return layers to include all the stages of the backbone self._backbone.return_layers = self._all_layers hidden_states = self._backbone(pixel_values, **kwargs) self._backbone.return_layers = self._return_layers feature_maps = tuple(hidden_states[i] for i in self.out_indices) else: feature_maps = self._backbone(pixel_values, **kwargs) hidden_states = None feature_maps = tuple(feature_maps) hidden_states = tuple(hidden_states) if hidden_states is not None else None if not return_dict: output = (feature_maps,) if output_hidden_states: output = output + (hidden_states,) return output return BackboneOutput(feature_maps=feature_maps, hidden_states=hidden_states, attentions=None) __all__ = ["TimmBackbone"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/timm_backbone/__init__.py
src/transformers/models/timm_backbone/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_timm_backbone import * from .modeling_timm_backbone import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lfm2/modeling_lfm2.py
src/transformers/models/lfm2/modeling_lfm2.py
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/lfm2/modular_lfm2.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_lfm2.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Callable from typing import Any, Optional, Union import torch import torch.nn.functional as F from torch import nn from ...cache_utils import Cache from ...generation import GenerationMixin from ...integrations import use_kernel_forward_from_hub, use_kernel_func_from_hub, use_kernelized_func from ...masking_utils import create_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...utils.generic import check_model_inputs, maybe_autocast from ...utils.import_utils import is_causal_conv1d_available, is_torchdynamo_compiling from .configuration_lfm2 import Lfm2Config if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_fn, causal_conv1d_update = None, None @use_kernel_forward_from_hub("RMSNorm") class Lfm2RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Lfm2RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class Lfm2RotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: Lfm2Config, device=None): super().__init__() self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_type = self.config.rope_parameters["rope_type"] rope_init_fn: Callable = self.compute_default_rope_parameters if self.rope_type != "default": rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False) @staticmethod def compute_default_rope_parameters( config: Optional[Lfm2Config] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with maybe_autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class Lfm2MLP(nn.Module): def __init__(self, config: Lfm2Config): super().__init__() intermediate_size = config.intermediate_size if config.block_auto_adjust_ff_dim: intermediate_size = int(2 * intermediate_size / 3) # custom dim factor multiplier if config.block_ffn_dim_multiplier is not None: intermediate_size = int(config.block_ffn_dim_multiplier * intermediate_size) intermediate_size = config.block_multiple_of * ( (intermediate_size + config.block_multiple_of - 1) // config.block_multiple_of ) self.w1 = nn.Linear(config.hidden_size, intermediate_size, bias=False) self.w3 = nn.Linear(config.hidden_size, intermediate_size, bias=False) self.w2 = nn.Linear(intermediate_size, config.hidden_size, bias=False) def forward(self, x): return self.w2(F.silu(self.w1(x)) * self.w3(x)) class Lfm2HybridConvCache: """ Attention and conv cache for Lfm2. It stores the Key and Value states as a list of tensors, one for each layer. Attention layer cache shape: `[batch_size, num_heads, seq_len, head_dim]`. Conv layer cache shape: `[batch_size, hidden_size, L_cache-1]`. """ # Override @property existing in Cache max_batch_size = None is_compileable = False key_cache = None value_cache = None def __init__( self, config: Lfm2Config, max_batch_size: int, dtype: torch.dtype = torch.float32, device: Union[torch.device, str, None] = None, ): self.key_cache = [] self.value_cache = [] self.max_batch_size = max_batch_size self.layer_types = config.layer_types self.first_attention_layer = self.layer_types.index("full_attention") self.conv_L_cache = config.conv_L_cache self._dtype = dtype self.conv_cache: list[torch.Tensor] = [] device = torch.device(device) if device is not None else None for _ in range(config.num_hidden_layers): conv_state = torch.zeros( self.max_batch_size, config.hidden_size, self.conv_L_cache, dtype=self._dtype, device=device, ) self.conv_cache.append(conv_state) self.key_cache.append(torch.tensor([])) self.value_cache.append(torch.tensor([])) def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, `optional`): Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`. Return: A tuple containing the updated key and value states. """ # Update the cache if self.key_cache[layer_idx].numel() == 0: self.key_cache[layer_idx] = key_states self.value_cache[layer_idx] = value_states else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) return self.key_cache[layer_idx], self.value_cache[layer_idx] def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" for layer_idx in range(len(self.key_cache)): if self.key_cache[layer_idx].numel(): device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) if self.conv_cache[layer_idx].numel(): device = self.conv_cache[layer_idx].device self.conv_cache[layer_idx] = self.conv_cache[layer_idx].index_select(0, beam_idx.to(device)) def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # take any layer that contains cache and not empty tensor layer_idx = self.first_attention_layer if self.layer_types[layer_idx] != "full_attention" else layer_idx if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0: return 0 return self.key_cache[layer_idx].shape[-2] def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]: """ Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for the given layer at `layer_idx`. The masks are then prepared according to the given lengths (kv_length, kv_offset) and patterns (i.e. sliding_window, chunk_size), for each layer. """ full_mask_kv_offset = 0 query_length = cache_position.shape[0] past_seen_tokens = self.get_seq_length() kv_length = query_length + past_seen_tokens return kv_length, full_mask_kv_offset def crop(self, max_length: int): """Crop the cache to the given length""" if max_length < 0: max_length = self.get_seq_length() - abs(max_length) if self.get_seq_length() <= max_length: return for idx in range(len(self.key_cache)): if self.key_cache[idx].numel(): self.key_cache[idx] = self.key_cache[idx][..., :max_length, :] self.value_cache[idx] = self.value_cache[idx][..., :max_length, :] def __len__(self) -> int: return len(self.key_cache) def reset(self): for layer_idx in range(len(self.conv_cache)): # In-place ops prevent breaking the static address self.conv_cache[layer_idx].zero_() def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) @use_kernel_func_from_hub("rotary_pos_emb") def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights @use_kernelized_func(apply_rotary_pos_emb) class Lfm2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Lfm2Config, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.is_causal = True self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.out_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) self.q_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps) self.k_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps) def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_layernorm(self.q_proj(hidden_states).view(*hidden_shape)).transpose(1, 2) key_states = self.k_layernorm(self.k_proj(hidden_states).view(*hidden_shape)).transpose(1, 2) value_states = self.v_proj(hidden_states).view(*hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() output = self.out_proj(attn_output) return output, attn_weights def apply_mask_to_padding_states(hidden_states, attention_mask): """ Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66 """ # NOTE: attention mask is a 2D boolean tensor if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: dtype = hidden_states.dtype hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return hidden_states kernel_modules = (causal_conv1d_fn, causal_conv1d_update) is_fast_path_available = all(kernel_modules) class Lfm2ShortConv(nn.Module): def __init__( self, config: Lfm2Config, layer_idx: int, ): super().__init__() self.config = config self.layer_idx = layer_idx self.L_cache = config.conv_L_cache self.bias = config.conv_bias self.conv = nn.Conv1d( in_channels=config.hidden_size, out_channels=config.hidden_size, kernel_size=self.L_cache, groups=config.hidden_size, bias=self.bias, padding=self.L_cache - 1, ) self.in_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=self.bias) self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=self.bias) def cuda_kernels_forward( self, x: torch.Tensor, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): x = apply_mask_to_padding_states(x, attention_mask) BCx = self.in_proj(x).transpose(-1, -2) B, C, x = BCx.chunk(3, dim=-2) Bx = B * x conv_weights = self.conv.weight.view(self.conv.weight.size(0), self.conv.weight.size(2)) if past_key_values is not None and cache_position[0] > 0: conv_out = causal_conv1d_update( Bx.squeeze(-1), past_key_values.conv_cache[self.layer_idx], conv_weights, self.conv.bias, None, ) conv_out = conv_out.unsqueeze(-1) else: if past_key_values is not None: conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0)) past_key_values.conv_cache[self.layer_idx].copy_(conv_state) conv_out = causal_conv1d_fn(Bx, conv_weights, self.conv.bias, activation=None) y = C * conv_out y = self.out_proj(y.transpose(-1, -2).contiguous()) return y def slow_forward( self, x: torch.Tensor, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): seqlen = x.shape[1] x = apply_mask_to_padding_states(x, attention_mask) BCx = self.in_proj(x).transpose(-1, -2) B, C, x = BCx.chunk(3, dim=-2) Bx = B * x if past_key_values is not None and cache_position[0] > 0: conv_state = past_key_values.conv_cache[self.layer_idx] cache_position = cache_position.clamp(0, self.L_cache - 1) conv_state = conv_state.roll(shifts=-1, dims=-1) conv_state[:, :, cache_position] = Bx.to(device=conv_state.device, dtype=conv_state.dtype) past_key_values.conv_cache[self.layer_idx].copy_(conv_state) conv_out = torch.sum(conv_state.to(Bx.device) * self.conv.weight[:, 0, :], dim=-1) if self.bias: conv_out += self.conv.bias conv_out = conv_out.unsqueeze(-1) else: if past_key_values is not None: conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0)) past_key_values.conv_cache[self.layer_idx].copy_(conv_state) conv_out = self.conv(Bx)[..., :seqlen] y = C * conv_out y = y.transpose(-1, -2).contiguous() y = self.out_proj(y) return y def forward( self, hidden_states: torch.Tensor, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): if is_fast_path_available and "cuda" in hidden_states.device.type and not is_torchdynamo_compiling(): return self.cuda_kernels_forward(hidden_states, past_key_values, cache_position, attention_mask) return self.slow_forward(hidden_states, past_key_values, cache_position, attention_mask) class Lfm2DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Lfm2Config, layer_idx: int): super().__init__() self.is_attention_layer = config.layer_types[layer_idx] == "full_attention" if self.is_attention_layer: self.self_attn = Lfm2Attention(config, layer_idx) else: self.conv = Lfm2ShortConv(config, layer_idx) self.feed_forward = Lfm2MLP(config) self.operator_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps) self.ffn_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps) def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> torch.Tensor: residual = hidden_states if self.is_attention_layer: hidden_states, _ = self.self_attn( hidden_states=self.operator_norm(hidden_states), position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) else: hidden_states = self.conv( hidden_states=self.operator_norm(hidden_states), past_key_values=past_key_values, cache_position=cache_position, attention_mask=attention_mask, ) hidden_states = hidden_states + residual hidden_states = hidden_states + self.feed_forward(self.ffn_norm(hidden_states)) return hidden_states @auto_docstring class Lfm2PreTrainedModel(PreTrainedModel): config: Lfm2Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Lfm2DecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = False _supports_attention_backend = True _can_record_outputs = { "hidden_states": Lfm2DecoderLayer, "attentions": Lfm2Attention, } @auto_docstring class Lfm2Model(Lfm2PreTrainedModel): def __init__(self, config: Lfm2Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Lfm2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.rotary_emb = Lfm2RotaryEmbedding(config=config) self.gradient_checkpointing = False self.embedding_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps) # Initialize weights and apply final processing self.post_init() @check_model_inputs @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Lfm2HybridConvCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: batch_size = inputs_embeds.shape[0] past_key_values = Lfm2HybridConvCache( config=self.config, max_batch_size=batch_size, dtype=self.dtype, device=self.device ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) # Skip masking for decoding stage. We check shape here to be compile-friendly linear_attention = attention_mask if inputs_embeds.shape[1] != 1 else None hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) # decoder layers for decoder_layer in self.layers[: self.config.num_hidden_layers]: layer_mask = causal_mask if decoder_layer.is_attention_layer else linear_attention hidden_states = decoder_layer( hidden_states, attention_mask=layer_mask, position_embeddings=position_embeddings, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) hidden_states = self.embedding_norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring class Lfm2ForCausalLM(Lfm2PreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} def __init__(self, config): super().__init__(config) self.model = Lfm2Model(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" Example: ```python >>> from transformers import AutoTokenizer, Lfm2ForCausalLM >>> model = Lfm2ForCausalLM.from_pretrained("meta-lfm2/Lfm2-2-7b-hf") >>> tokenizer = AutoTokenizer.from_pretrained("meta-lfm2/Lfm2-2-7b-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values,
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lfm2/modular_lfm2.py
src/transformers/models/lfm2/modular_lfm2.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Callable from typing import Any, Optional, Union import torch import torch.nn.functional as F from torch import nn from ...masking_utils import create_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ...utils.import_utils import is_causal_conv1d_available, is_torchdynamo_compiling from ..bamba.modeling_bamba import apply_mask_to_padding_states from ..gemma2.modeling_gemma2 import Gemma2RotaryEmbedding from ..llama.modeling_llama import ( LlamaAttention, LlamaForCausalLM, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm, apply_rotary_pos_emb, eager_attention_forward, ) from .configuration_lfm2 import Lfm2Config if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_fn, causal_conv1d_update = None, None kernel_modules = (causal_conv1d_fn, causal_conv1d_update) is_fast_path_available = all(kernel_modules) logger = logging.get_logger(__name__) class Lfm2RMSNorm(LlamaRMSNorm): pass class Lfm2RotaryEmbedding(Gemma2RotaryEmbedding): pass class Lfm2MLP(nn.Module): def __init__(self, config: Lfm2Config): super().__init__() intermediate_size = config.intermediate_size if config.block_auto_adjust_ff_dim: intermediate_size = int(2 * intermediate_size / 3) # custom dim factor multiplier if config.block_ffn_dim_multiplier is not None: intermediate_size = int(config.block_ffn_dim_multiplier * intermediate_size) intermediate_size = config.block_multiple_of * ( (intermediate_size + config.block_multiple_of - 1) // config.block_multiple_of ) self.w1 = nn.Linear(config.hidden_size, intermediate_size, bias=False) self.w3 = nn.Linear(config.hidden_size, intermediate_size, bias=False) self.w2 = nn.Linear(intermediate_size, config.hidden_size, bias=False) def forward(self, x): return self.w2(F.silu(self.w1(x)) * self.w3(x)) class Lfm2HybridConvCache: """ Attention and conv cache for Lfm2. It stores the Key and Value states as a list of tensors, one for each layer. Attention layer cache shape: `[batch_size, num_heads, seq_len, head_dim]`. Conv layer cache shape: `[batch_size, hidden_size, L_cache-1]`. """ # Override @property existing in Cache max_batch_size = None is_compileable = False key_cache = None value_cache = None def __init__( self, config: Lfm2Config, max_batch_size: int, dtype: torch.dtype = torch.float32, device: Union[torch.device, str, None] = None, ): self.key_cache = [] self.value_cache = [] self.max_batch_size = max_batch_size self.layer_types = config.layer_types self.first_attention_layer = self.layer_types.index("full_attention") self.conv_L_cache = config.conv_L_cache self._dtype = dtype self.conv_cache: list[torch.Tensor] = [] device = torch.device(device) if device is not None else None for _ in range(config.num_hidden_layers): conv_state = torch.zeros( self.max_batch_size, config.hidden_size, self.conv_L_cache, dtype=self._dtype, device=device, ) self.conv_cache.append(conv_state) self.key_cache.append(torch.tensor([])) self.value_cache.append(torch.tensor([])) def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, `optional`): Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`. Return: A tuple containing the updated key and value states. """ # Update the cache if self.key_cache[layer_idx].numel() == 0: self.key_cache[layer_idx] = key_states self.value_cache[layer_idx] = value_states else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) return self.key_cache[layer_idx], self.value_cache[layer_idx] def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" for layer_idx in range(len(self.key_cache)): if self.key_cache[layer_idx].numel(): device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) if self.conv_cache[layer_idx].numel(): device = self.conv_cache[layer_idx].device self.conv_cache[layer_idx] = self.conv_cache[layer_idx].index_select(0, beam_idx.to(device)) def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # take any layer that contains cache and not empty tensor layer_idx = self.first_attention_layer if self.layer_types[layer_idx] != "full_attention" else layer_idx if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0: return 0 return self.key_cache[layer_idx].shape[-2] def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]: """ Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for the given layer at `layer_idx`. The masks are then prepared according to the given lengths (kv_length, kv_offset) and patterns (i.e. sliding_window, chunk_size), for each layer. """ full_mask_kv_offset = 0 query_length = cache_position.shape[0] past_seen_tokens = self.get_seq_length() kv_length = query_length + past_seen_tokens return kv_length, full_mask_kv_offset def crop(self, max_length: int): """Crop the cache to the given length""" if max_length < 0: max_length = self.get_seq_length() - abs(max_length) if self.get_seq_length() <= max_length: return for idx in range(len(self.key_cache)): if self.key_cache[idx].numel(): self.key_cache[idx] = self.key_cache[idx][..., :max_length, :] self.value_cache[idx] = self.value_cache[idx][..., :max_length, :] def __len__(self) -> int: return len(self.key_cache) def reset(self): for layer_idx in range(len(self.conv_cache)): # In-place ops prevent breaking the static address self.conv_cache[layer_idx].zero_() class Lfm2Attention(LlamaAttention): def __init__(self, config: Lfm2Config, layer_idx: int): super().__init__(config, layer_idx) self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.out_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) self.q_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps) self.k_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps) del self.o_proj del self.attention_dropout def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_layernorm(self.q_proj(hidden_states).view(*hidden_shape)).transpose(1, 2) key_states = self.k_layernorm(self.k_proj(hidden_states).view(*hidden_shape)).transpose(1, 2) value_states = self.v_proj(hidden_states).view(*hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() output = self.out_proj(attn_output) return output, attn_weights class Lfm2ShortConv(nn.Module): def __init__( self, config: Lfm2Config, layer_idx: int, ): super().__init__() self.config = config self.layer_idx = layer_idx self.L_cache = config.conv_L_cache self.bias = config.conv_bias self.conv = nn.Conv1d( in_channels=config.hidden_size, out_channels=config.hidden_size, kernel_size=self.L_cache, groups=config.hidden_size, bias=self.bias, padding=self.L_cache - 1, ) self.in_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=self.bias) self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=self.bias) def cuda_kernels_forward( self, x: torch.Tensor, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): x = apply_mask_to_padding_states(x, attention_mask) BCx = self.in_proj(x).transpose(-1, -2) B, C, x = BCx.chunk(3, dim=-2) Bx = B * x conv_weights = self.conv.weight.view(self.conv.weight.size(0), self.conv.weight.size(2)) if past_key_values is not None and cache_position[0] > 0: conv_out = causal_conv1d_update( Bx.squeeze(-1), past_key_values.conv_cache[self.layer_idx], conv_weights, self.conv.bias, None, ) conv_out = conv_out.unsqueeze(-1) else: if past_key_values is not None: conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0)) past_key_values.conv_cache[self.layer_idx].copy_(conv_state) conv_out = causal_conv1d_fn(Bx, conv_weights, self.conv.bias, activation=None) y = C * conv_out y = self.out_proj(y.transpose(-1, -2).contiguous()) return y def slow_forward( self, x: torch.Tensor, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): seqlen = x.shape[1] x = apply_mask_to_padding_states(x, attention_mask) BCx = self.in_proj(x).transpose(-1, -2) B, C, x = BCx.chunk(3, dim=-2) Bx = B * x if past_key_values is not None and cache_position[0] > 0: conv_state = past_key_values.conv_cache[self.layer_idx] cache_position = cache_position.clamp(0, self.L_cache - 1) conv_state = conv_state.roll(shifts=-1, dims=-1) conv_state[:, :, cache_position] = Bx.to(device=conv_state.device, dtype=conv_state.dtype) past_key_values.conv_cache[self.layer_idx].copy_(conv_state) conv_out = torch.sum(conv_state.to(Bx.device) * self.conv.weight[:, 0, :], dim=-1) if self.bias: conv_out += self.conv.bias conv_out = conv_out.unsqueeze(-1) else: if past_key_values is not None: conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0)) past_key_values.conv_cache[self.layer_idx].copy_(conv_state) conv_out = self.conv(Bx)[..., :seqlen] y = C * conv_out y = y.transpose(-1, -2).contiguous() y = self.out_proj(y) return y def forward( self, hidden_states: torch.Tensor, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): if is_fast_path_available and "cuda" in hidden_states.device.type and not is_torchdynamo_compiling(): return self.cuda_kernels_forward(hidden_states, past_key_values, cache_position, attention_mask) return self.slow_forward(hidden_states, past_key_values, cache_position, attention_mask) class Lfm2DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Lfm2Config, layer_idx: int): super().__init__() self.is_attention_layer = config.layer_types[layer_idx] == "full_attention" if self.is_attention_layer: self.self_attn = Lfm2Attention(config, layer_idx) else: self.conv = Lfm2ShortConv(config, layer_idx) self.feed_forward = Lfm2MLP(config) self.operator_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps) self.ffn_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps) def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> torch.Tensor: residual = hidden_states if self.is_attention_layer: hidden_states, _ = self.self_attn( hidden_states=self.operator_norm(hidden_states), position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) else: hidden_states = self.conv( hidden_states=self.operator_norm(hidden_states), past_key_values=past_key_values, cache_position=cache_position, attention_mask=attention_mask, ) hidden_states = hidden_states + residual hidden_states = hidden_states + self.feed_forward(self.ffn_norm(hidden_states)) return hidden_states class Lfm2PreTrainedModel(LlamaPreTrainedModel): _can_compile_fullgraph = False class Lfm2Model(LlamaModel): def __init__(self, config: Lfm2Config): super().__init__(config) self.embedding_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps) del self.norm def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Lfm2HybridConvCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: batch_size = inputs_embeds.shape[0] past_key_values = Lfm2HybridConvCache( config=self.config, max_batch_size=batch_size, dtype=self.dtype, device=self.device ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) # Skip masking for decoding stage. We check shape here to be compile-friendly linear_attention = attention_mask if inputs_embeds.shape[1] != 1 else None hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) # decoder layers for decoder_layer in self.layers[: self.config.num_hidden_layers]: layer_mask = causal_mask if decoder_layer.is_attention_layer else linear_attention hidden_states = decoder_layer( hidden_states, attention_mask=layer_mask, position_embeddings=position_embeddings, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) hidden_states = self.embedding_norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) class Lfm2ForCausalLM(LlamaForCausalLM): pass __all__ = ["Lfm2ForCausalLM", "Lfm2Model", "Lfm2PreTrainedModel"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lfm2/configuration_lfm2.py
src/transformers/models/lfm2/configuration_lfm2.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from ...configuration_utils import PreTrainedConfig from ...modeling_rope_utils import RopeParameters class Lfm2Config(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Lfm2Model`]. It is used to instantiate a LFM2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LFM2-1.2B model. e.g. [LiquidAI/LFM2-1.2B](https://huggingface.co/LiquidAI/LFM2-1.2B) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 65536): Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Lfm2Model`] hidden_size (`int`, *optional*, defaults to 2560): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 12288): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. max_position_embeddings (`int`, *optional*, defaults to 128000): The maximum sequence length that this model might ever be used with. Lfm2 1 supports up to 2048 tokens, Lfm2 2 up to 4096, CodeLfm2 up to 16384. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 1): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. conv_bias (`bool`, *optional*, defaults to `False`): Whether to use bias in the conv layers. conv_L_cache (`int`, *optional*, defaults to 3): L_cache dim in the conv layers. block_multiple_of (`int`, *optional*, defaults to 256): Multiple for the `intermediate_size`. block_ffn_dim_multiplier (`float`, *optional*, defaults to 1.0): Multiplier for the `intermediate_size`. block_auto_adjust_ff_dim (`bool`, *optional*, defaults to `True`): Whether to adjust the dim of the `intermediate_size`. full_attn_idxs (`Optional`, *optional*): Index of the layers which use attention. layer_types (`Optional`, *optional*): Type of each layers. ```python >>> from transformers import Lfm2Model, Lfm2Config >>> # Initializing a LFM2 model >>> configuration = Lfm2Config() >>> # Initializing a model from the LFM2-1.2B style configuration >>> model = Lfm2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "lfm2" keys_to_ignore_at_inference = ["past_key_values"] default_theta = 1000000.0 def __init__( self, vocab_size: Optional[int] = 65536, hidden_size: Optional[int] = 2560, intermediate_size: Optional[int] = 12288, num_hidden_layers: Optional[int] = 32, num_attention_heads: Optional[int] = 32, num_key_value_heads: Optional[int] = 8, max_position_embeddings: Optional[int] = 128_000, initializer_range: Optional[float] = 0.02, norm_eps: Optional[float] = 0.00001, use_cache: Optional[bool] = True, pad_token_id: Optional[int] = 0, bos_token_id: Optional[int] = 1, eos_token_id: Optional[int] = 2, tie_word_embeddings: Optional[bool] = True, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, conv_bias: Optional[bool] = False, conv_L_cache: Optional[int] = 3, block_multiple_of: Optional[int] = 256, block_ffn_dim_multiplier: Optional[float] = 1.0, block_auto_adjust_ff_dim: Optional[bool] = True, full_attn_idxs: Optional[list[int]] = None, layer_types: Optional[list[str]] = None, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.max_position_embeddings = max_position_embeddings self.use_cache = use_cache self.norm_eps = norm_eps self.initializer_range = initializer_range # attn operator config self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads # custom operator config self.conv_bias = conv_bias self.conv_L_cache = conv_L_cache # MLP config self.intermediate_size = kwargs.get("block_ff_dim", intermediate_size) # to fit original config keys self.block_multiple_of = block_multiple_of self.block_ffn_dim_multiplier = block_ffn_dim_multiplier self.block_auto_adjust_ff_dim = block_auto_adjust_ff_dim self.layer_types = layer_types if self.layer_types is None: full_attn_idxs = full_attn_idxs if full_attn_idxs is not None else list(range(num_hidden_layers)) self.layer_types = ["full_attention" if i in full_attn_idxs else "conv" for i in range(num_hidden_layers)] self.rope_parameters = rope_parameters tie_word_embeddings = kwargs.get("tie_embedding", tie_word_embeddings) # to fit original config keys super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["Lfm2Config"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lfm2/__init__.py
src/transformers/models/lfm2/__init__.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_lfm2 import * from .modeling_lfm2 import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py
src/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OPT checkpoint.""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def load_checkpoint(checkpoint_path): """Checkpoint path should end in model.pt""" sd = torch.load(checkpoint_path, map_location="cpu", weights_only=True) if "model" in sd: sd = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model"] # pop unnecessary weights keys_to_delete = [ "decoder.version", "decoder.output_projection.weight", ] for key in keys_to_delete: if key in sd: sd.pop(key) keys_to_rename = { "decoder.project_in_dim.weight": "decoder.project_in.weight", "decoder.project_out_dim.weight": "decoder.project_out.weight", "decoder.layer_norm.weight": "decoder.final_layer_norm.weight", "decoder.layer_norm.bias": "decoder.final_layer_norm.bias", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: sd[new_key] = sd.pop(old_key) keys = list(sd.keys()) for key in keys: if ".qkv_proj." in key: value = sd[key] # We split QKV in separate Q,K,V q_name = key.replace(".qkv_proj.", ".q_proj.") k_name = key.replace(".qkv_proj.", ".k_proj.") v_name = key.replace(".qkv_proj.", ".v_proj.") depth = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 k, v, q = torch.split(value, depth // 3, dim=0) sd[q_name] = q sd[k_name] = k sd[v_name] = v del sd[key] return sd @torch.no_grad() def convert_opt_checkpoint(checkpoint_path, pytorch_dump_folder_path, config=None): """ Copy/paste/tweak model's weights to our BERT structure. """ state_dict = load_checkpoint(checkpoint_path) if config is not None: config = OPTConfig.from_pretrained(config) else: config = OPTConfig() model = OPTModel(config).half().eval() model.load_state_dict(state_dict) # Check results Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") args = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/opt/configuration_opt.py
src/transformers/models/opt/configuration_opt.py
# coding=utf-8 # Copyright 2022 The Metaseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OPT model configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class OPTConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OPT [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50272): Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OPTModel`] hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of decoder layers. ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). do_layer_norm_before (`bool`, *optional*, defaults to `True`): Whether to perform layer normalization before the attention block. word_embed_proj_dim (`int`, *optional*): `word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to `hidden_size`. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). enable_bias (`bool`, *optional*, defaults to `True`): Whether or not if the linear layers in the attention blocks should use the bias term. layer_norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether or not if the layer norms should have learnable parameters. Example: ```python >>> from transformers import OPTConfig, OPTModel >>> # Initializing a OPT facebook/opt-large style configuration >>> configuration = OPTConfig() >>> # Initializing a model (with random weights) from the facebook/opt-large style configuration >>> model = OPTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "opt" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=50272, hidden_size=768, num_hidden_layers=12, ffn_dim=3072, max_position_embeddings=2048, do_layer_norm_before=True, _remove_final_layer_norm=False, word_embed_proj_dim=None, dropout=0.1, attention_dropout=0.0, num_attention_heads=12, activation_function="relu", layerdrop=0.0, init_std=0.02, use_cache=True, pad_token_id=1, bos_token_id=2, eos_token_id=2, enable_bias=True, layer_norm_elementwise_affine=True, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.num_attention_heads = num_attention_heads self.word_embed_proj_dim = word_embed_proj_dim if word_embed_proj_dim is not None else hidden_size self.ffn_dim = ffn_dim self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.dropout = dropout self.attention_dropout = attention_dropout self.activation_function = activation_function self.init_std = init_std self.layerdrop = layerdrop self.use_cache = use_cache self.do_layer_norm_before = do_layer_norm_before # We keep these variables at `True` for backward compatibility. self.enable_bias = enable_bias self.layer_norm_elementwise_affine = layer_norm_elementwise_affine # Note that the only purpose of `_remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 self._remove_final_layer_norm = _remove_final_layer_norm __all__ = ["OPTConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/opt/modeling_opt.py
src/transformers/models/opt/modeling_opt.py
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OPT model.""" from collections.abc import Callable from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from .configuration_opt import OPTConfig if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) class OPTLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward( self, attention_mask: torch.LongTensor, past_key_values_length: int = 0, position_ids: Optional[torch.LongTensor] = None, ): """`input_ids_shape` is expected to be [bsz x seqlen].""" if position_ids is None: position_ids = torch.cumsum(attention_mask, dim=1) position_ids = (position_ids * attention_mask - 1).long() # cut positions if `past_key_values_length` is > 0 position_ids = position_ids[:, past_key_values_length:] return super().forward(position_ids + self.offset) # Copied from transformers.models.siglip.modeling_siglip.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class OPTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config: OPTConfig, layer_idx: Optional[int] = None, **kwargs, ): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.dropout = config.attention_dropout self.enable_bias = config.enable_bias self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.head_dim = self.embed_dim // self.num_heads self.is_causal = True if (self.head_dim * self.num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {self.num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) def forward( self, hidden_states: torch.Tensor, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, _ = hidden_states.size() # Scaling is susceptible to floating point arithmetics' inprecisions # which can lead to different results (this is dependent from model # to model, e.g. whisper is one such case). We therefore keep the # original order of scaling to follow the original implementation # and enforce no scaling (1.0) in the attention call below. query_states = self.q_proj(hidden_states) * self.scaling query_states = query_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation key_states, value_states = past_key_values.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=1.0, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights class OPTDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: OPTConfig, layer_idx: Optional[int] = None): super().__init__() self.embed_dim = config.hidden_size self.self_attn = OPTAttention(config=config, layer_idx=layer_idx) self.do_layer_norm_before = config.do_layer_norm_before self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.self_attn_layer_norm = nn.LayerNorm( self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine ) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=config.enable_bias) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=config.enable_bias) self.final_layer_norm = nn.LayerNorm(self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, position_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Cache`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence.. """ residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, position_ids=position_ids, attention_mask=attention_mask, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Fully Connected hidden_states_shape = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = (residual + hidden_states).view(hidden_states_shape) # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs @auto_docstring class OPTPreTrainedModel(PreTrainedModel): config: OPTConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["OPTDecoderLayer"] _supports_attention_backend = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True class OPTDecoder(OPTPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`] Args: config: OPTConfig """ def __init__(self, config: OPTConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.word_embed_proj_dim, self.padding_idx) self.embed_positions = OPTLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size) if config.word_embed_proj_dim != config.hidden_size: self.project_out = nn.Linear(config.hidden_size, config.word_embed_proj_dim, bias=False) else: self.project_out = None if config.word_embed_proj_dim != config.hidden_size: self.project_in = nn.Linear(config.word_embed_proj_dim, config.hidden_size, bias=False) else: self.project_in = None # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 if config.do_layer_norm_before and not config._remove_final_layer_norm: self.final_layer_norm = nn.LayerNorm( config.hidden_size, elementwise_affine=config.layer_norm_elementwise_affine ) else: self.final_layer_norm = None self.layers = nn.ModuleList([OPTDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask @can_return_tuple def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, position_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. for padding use -1. [What are position IDs?](../glossary#position-ids) cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if input_ids is not None: input_ids = input_ids.view(-1, input_ids.shape[-1]) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if attention_mask is None: seq_length = past_seen_tokens + inputs_embeds.shape[1] attention_mask = torch.ones(inputs_embeds.shape[0], seq_length, device=inputs_embeds.device) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # embed positions if position_ids is None: # position_ids = cache_position.unsqueeze(0) position_ids = torch.cumsum(attention_mask, dim=1) position_ids = (position_ids * attention_mask - 1).long() # cut positions if `past_seen_tokens` is > 0 position_ids = position_ids[:, past_seen_tokens:] pos_embeds = self.embed_positions(attention_mask, past_seen_tokens, position_ids=position_ids) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds.to(inputs_embeds.device) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) @auto_docstring class OPTModel(OPTPreTrainedModel): def __init__(self, config: OPTConfig): super().__init__(config) self.decoder = OPTDecoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, position_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) return BaseModelOutputWithPast( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, ) class OPTForCausalLM(OPTPreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.decoder.embed_tokens.weight"} def __init__(self, config): super().__init__(config) self.model = OPTModel(config) # the lm_head weight is automatically tied to the embed tokens weight self.lm_head = nn.Linear(config.word_embed_proj_dim, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, position_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/opt/__init__.py
src/transformers/models/opt/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_opt import * from .modeling_opt import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py
src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" import argparse import os import torch from transformers import GPT2Config, GPT2Model from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): """Load tf checkpoints in a pytorch model""" try: import re import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(gpt2_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for name, array in zip(names, arrays): name = name[6:] # skip "model/" name = name.split("/") pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+\d+", m_name): scope_names = re.split(r"(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "w" or scope_names[0] == "g": pointer = getattr(pointer, "weight") elif scope_names[0] == "b": pointer = getattr(pointer, "bias") elif scope_names[0] == "wpe" or scope_names[0] == "wte": pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, "weight") else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except ValueError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path): # Construct model if gpt2_config_file == "": config = GPT2Config() else: config = GPT2Config.from_json_file(gpt2_config_file) model = GPT2Model(config) # Load weights from numpy load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path) # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(model.state_dict(), pytorch_weights_dump_path) print(f"Save configuration file to {pytorch_config_dump_path}") with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--gpt2_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) args = parser.parse_args() convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path, args.gpt2_config_file, args.pytorch_dump_folder_path)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/gpt2/__init__.py
src/transformers/models/gpt2/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_gpt2 import * from .modeling_gpt2 import * from .tokenization_gpt2 import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/gpt2/configuration_gpt2.py
src/transformers/models/gpt2/configuration_gpt2.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OpenAI GPT-2 configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class GPT2Config(PreTrainedConfig): """ This is the configuration class to store the configuration of a [`GPT2Model`]. It is used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT-2 [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50257): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPT2Model`]. n_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states. n_layer (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. n_inner (`int`, *optional*): Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd activation_function (`str`, *optional*, defaults to `"gelu_new"`): Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. summary_type (`string`, *optional*, defaults to `"cls_index"`): Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`]. Has to be one of the following options: - `"last"`: Take the last token hidden state (like XLNet). - `"first"`: Take the first token hidden state (like BERT). - `"mean"`: Take the mean of all tokens hidden states. - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - `"attn"`: Not implemented now, use multi-head attention. summary_use_proj (`bool`, *optional*, defaults to `True`): Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`]. Whether or not to add a projection after the vector extraction. summary_activation (`str`, *optional*): Argument used when doing sequence summary. Used in for the multiple choice head in [`GPT2DoubleHeadsModel`]. Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation. summary_proj_to_labels (`bool`, *optional*, defaults to `True`): Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`]. Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes. summary_first_dropout (`float`, *optional*, defaults to 0.1): Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`]. The dropout ratio to be used after the projection and activation. scale_attn_weights (`bool`, *optional*, defaults to `True`): Scale attention weights by dividing by sqrt(hidden_size).. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). bos_token_id (`int`, *optional*, defaults to 50256): Id of the beginning of sentence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 50256): Id of the end of sentence token in the vocabulary. scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`): Whether to additionally scale attention weights by `1 / layer_idx + 1`. reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention dot-product/softmax to float() when training with mixed precision. Example: ```python >>> from transformers import GPT2Config, GPT2Model >>> # Initializing a GPT2 configuration >>> configuration = GPT2Config() >>> # Initializing a model (with random weights) from the configuration >>> model = GPT2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gpt2" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function="gelu_new", resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, summary_type="cls_index", summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs, ): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx self.reorder_and_upcast_attn = reorder_and_upcast_attn self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) __all__ = ["GPT2Config"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/gpt2/tokenization_gpt2.py
src/transformers/models/gpt2/tokenization_gpt2.py
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" from typing import Optional, Union from tokenizers import Tokenizer, decoders, pre_tokenizers from tokenizers.models import BPE from ...tokenization_utils_tokenizers import AddedToken, TokenizersBackend from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } class GPT2Tokenizer(TokenizersBackend): """ Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import GPT2Tokenizer >>> tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") >>> tokenizer("Hello world")["input_ids"] [15496, 995] >>> tokenizer(" Hello world")["input_ids"] [18435, 995] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. pad_token (`str`, *optional*): The token used for padding, for example when batching sequences of different lengths. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (GPT2 tokenizer detect beginning of words by the preceding space). add_bos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an initial beginning of sentence token to the input. This allows to treat the leading word just as any other word. vocab (`str` or `dict[str, int]`, *optional*): Custom vocabulary dictionary. If not provided, vocabulary is loaded from `vocab_file`. merges (`str` or `list[str]`, *optional*): Custom merges list. If not provided, merges are loaded from `merges_file`. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] model = BPE def __init__( self, vocab: Optional[Union[str, dict[str, int]]] = None, merges: Optional[Union[str, list[str]]] = None, errors: str = "replace", unk_token: Union[AddedToken, str] = "<|endoftext|>", bos_token: Union[AddedToken, str] = "<|endoftext|>", eos_token: Union[AddedToken, str] = "<|endoftext|>", pad_token: Optional[Union[AddedToken, str]] = None, add_prefix_space=False, **kwargs, ): self.add_prefix_space = add_prefix_space self._vocab = vocab if vocab is not None else {} self._merges = merges or [] self._tokenizer = Tokenizer( BPE( vocab=self._vocab, merges=self._merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) self._tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) self._tokenizer.decoder = decoders.ByteLevel() super().__init__( errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, **kwargs, ) __all__ = ["GPT2Tokenizer"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/gpt2/modeling_gpt2.py
src/transformers/models/gpt2/modeling_gpt2.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OpenAI GPT-2 model.""" import math from collections.abc import Callable from dataclasses import dataclass from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ... import initialization as init from ...activations import ACT2FN, get_activation from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...pytorch_utils import Conv1D from ...utils import ( ModelOutput, auto_docstring, logging, ) from ...utils.generic import maybe_autocast from .configuration_gpt2 import GPT2Config logger = logging.get_logger(__name__) def eager_attention_forward(module, query, key, value, attention_mask, **kwargs): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if module.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device ) # Layer-wise attention scaling if module.scale_attn_by_inverse_layer_idx: attn_weights = attn_weights / float(module.layer_idx + 1) if not module.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = module.bias[:, :, key_length - query_length : key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value) if attention_mask is not None: # Apply the attention mask causal_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise attn_weights = attn_weights.type(value.dtype) attn_weights = module.attn_dropout(attn_weights) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights class GPT2Attention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() self.config = config max_positions = config.max_position_embeddings self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( 1, 1, max_positions, max_positions ), persistent=False, ) self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.split_size = self.embed_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale_attn_weights = config.scale_attn_weights self.is_cross_attention = is_cross_attention # Layer-wise attention scaling, reordering, and upcasting self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx self.layer_idx = layer_idx self.reorder_and_upcast_attn = config.reorder_and_upcast_attn if self.is_cross_attention: self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) self.q_attn = Conv1D(self.embed_dim, self.embed_dim) else: self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) self.c_proj = Conv1D(self.embed_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.is_causal = not is_cross_attention def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None): # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM) bsz, num_heads, q_seq_len, dk = query.size() _, _, k_seq_len, _ = key.size() # Preallocate attn_weights for `baddbmm` attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device) # Compute Scale Factor scale_factor = 1.0 if self.scale_attn_weights: scale_factor /= float(value.size(-1)) ** 0.5 if self.scale_attn_by_inverse_layer_idx: scale_factor /= float(self.layer_idx + 1) # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk)) with maybe_autocast(query.device.type, enabled=False): q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype, device=attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise if attn_weights.dtype != torch.float32: raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32") attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]: is_cross_attention = encoder_hidden_states is not None if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_layer from cache curr_past_key_values = past_key_values.cross_attention_cache else: curr_past_key_values = past_key_values.self_attention_cache else: curr_past_key_values = past_key_values if is_cross_attention: if not hasattr(self, "q_attn"): raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." ) query_states = self.q_attn(hidden_states) attention_mask = encoder_attention_mask # Try to get key/value states from cache if possible if past_key_values is not None and is_updated: key_states = curr_past_key_values.layers[self.layer_idx].keys value_states = curr_past_key_values.layers[self.layer_idx].values else: key_states, value_states = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) shape_kv = (*key_states.shape[:-1], -1, self.head_dim) key_states = key_states.view(shape_kv).transpose(1, 2) value_states = value_states.view(shape_kv).transpose(1, 2) else: query_states, key_states, value_states = self.c_attn(hidden_states).split(self.split_size, dim=2) shape_kv = (*key_states.shape[:-1], -1, self.head_dim) key_states = key_states.view(shape_kv).transpose(1, 2) value_states = value_states.view(shape_kv).transpose(1, 2) shape_q = (*query_states.shape[:-1], -1, self.head_dim) query_states = query_states.view(shape_q).transpose(1, 2) if (past_key_values is not None and not is_cross_attention) or ( past_key_values is not None and is_cross_attention and not is_updated ): # save all key/value_layer to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_values.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True using_eager = self.config._attn_implementation == "eager" attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] if using_eager and self.reorder_and_upcast_attn: attn_output, attn_weights = self._upcast_and_reordered_attn( query_states, key_states, value_states, attention_mask ) else: attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=self.attn_dropout.p if self.training else 0.0, **kwargs, ) attn_output = attn_output.reshape(*attn_output.shape[:-2], -1).contiguous() attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) return attn_output, attn_weights class GPT2MLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GPT2Block(GradientCheckpointingLayer): def __init__(self, config, layer_idx=None): super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPT2Attention(config=config, layer_idx=layer_idx) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: self.crossattention = GPT2Attention(config=config, is_cross_attention=True, layer_idx=layer_idx) self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPT2MLP(inner_dim, config) def forward( self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, **kwargs, ) -> Union[tuple[torch.Tensor], Optional[tuple[torch.Tensor, tuple[torch.FloatTensor, ...]]]]: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_output, self_attn_weights = self.attn( hidden_states, past_key_values=past_key_values, cache_position=cache_position, attention_mask=attention_mask, use_cache=use_cache, output_attentions=output_attentions, **kwargs, ) # residual connection hidden_states = attn_output + residual if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_output, cross_attn_weights = self.crossattention( hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) # residual connection hidden_states = residual + cross_attn_output residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) # residual connection hidden_states = residual + feed_forward_hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if encoder_hidden_states is not None: outputs += (cross_attn_weights,) return outputs # Copied from transformers.models.xlm.modeling_xlm.XLMSequenceSummary with XLM->GPT2 class GPT2SequenceSummary(nn.Module): r""" Compute a single vector summary of a sequence hidden states. Args: config ([`GPT2Config`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. """ def __init__(self, config: GPT2Config): super().__init__() self.summary_type = getattr(config, "summary_type", "last") if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = nn.Identity() if hasattr(config, "summary_use_proj") and config.summary_use_proj: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) activation_string = getattr(config, "summary_activation", None) self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity() self.first_dropout = nn.Identity() if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = nn.Identity() if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward( self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None ) -> torch.FloatTensor: """ Compute a single vector summary of a sequence hidden states. Args: hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): The hidden states of the last layer. cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. Returns: `torch.FloatTensor`: The summary of the sequence hidden states. """ if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = hidden_states.mean(dim=1) elif self.summary_type == "cls_index": if cls_index is None: cls_index = torch.full_like( hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long, ) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == "attn": raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output @auto_docstring class GPT2PreTrainedModel(PreTrainedModel): config: GPT2Config base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["GPT2Block"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _supports_attention_backend = True _can_compile_fullgraph = True @torch.no_grad() def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, Conv1D)): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, nn.Embedding): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) # Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False): init.zeros_(module.weight[module.padding_idx]) elif isinstance(module, nn.LayerNorm): init.zeros_(module.bias) init.ones_(module.weight) elif isinstance(module, GPT2Attention): max_positions = module.config.max_position_embeddings init.copy_( module.bias, torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( 1, 1, max_positions, max_positions ), ) # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py if isinstance(module, PreTrainedModel): for name, p in module.named_parameters(): if name == "c_proj.weight": # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block init.normal_(p, mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.n_layer)) @dataclass @auto_docstring( custom_intro=""" Base class for outputs of models predicting if two sentences are consecutive or not. """ ) class GPT2DoubleHeadsModelOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided): Multiple choice classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ loss: Optional[torch.FloatTensor] = None mc_loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None mc_logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @auto_docstring class GPT2Model(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.gradient_checkpointing = False self._attn_implementation = config._attn_implementation # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # based on pattern from src/transformers/models/whisper/modeling_whisper.py::WhisperDecoder if use_cache: if past_key_values is None: past_key_values = DynamicCache(config=self.config) if self.config.add_cross_attention and not isinstance(past_key_values, EncoderDecoderCache): past_key_values = EncoderDecoderCache(past_key_values, DynamicCache(config=self.config)) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device) # Attention mask. # ._update_causal_mask() and ._prepare_4d_causal_attention_mask_with_cache_position() copied from LlamaModel if attention_mask is not None and attention_mask.ndim < 4: attention_mask = attention_mask.view(batch_size, -1) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] _use_sdpa = self._attn_implementation == "sdpa" and output_attentions is False if self.config.add_cross_attention and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) if _use_sdpa: encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( mask=encoder_attention_mask, dtype=inputs_embeds.dtype, tgt_len=input_shape[-1] ) elif self._attn_implementation != "flash_attention_2": encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_attention_mask = None if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py
src/transformers/models/unispeech_sat/configuration_unispeech_sat.py
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """UniSpeechSat model configuration""" import functools import operator from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class UniSpeechSatConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`UniSpeechSatModel`]. It is used to instantiate an UniSpeechSat model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UniSpeechSat [microsoft/unispeech-sat-base-100h-libri-ft](https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the UniSpeechSat model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`UniSpeechSatModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`UniSpeechSatModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the output of the feature encoder that's used by the quantizer. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`UniSpeechSatForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_extract_activation (`str, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 2, 2)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. do_stable_layer_norm (`bool`, *optional*, defaults to `False`): Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is False` corresponds to applying layer norm after the attention layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://huggingface.co/papers/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2): The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0): The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' num_codevectors_per_group (`int`, *optional*, defaults to 320): Number of entries in each quantization codebook (group). num_codevector_groups (`int`, *optional*, defaults to 2): Number of codevector groups for product codevector quantization. contrastive_logits_temperature (`float`, *optional*, defaults to 0.1): The temperature *kappa* in the contrastive loss. num_negatives (`int`, *optional*, defaults to 100): Number of negative samples for the contrastive loss. codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the quantized feature vectors. proj_codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the final projection of both the quantized and the transformer features. diversity_loss_weight (`int`, *optional*, defaults to 0.1): The weight of the codebook diversity loss component. ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`UniSpeechSatForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`UniSpeechSatForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`UniSpeechSatForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`tuple[int]` or `list[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. num_clusters (`int`, *optional*, defaults to 504): Number of clusters for weak labeling. Only relevant when using an instance of [`UniSpeechSatForPreTraining`]. Example: ```python >>> from transformers import UniSpeechSatModel, UniSpeechSatConfig >>> # Initializing a UniSpeechSat microsoft/unispeech-sat-base-100h-libri-ft style configuration >>> configuration = UniSpeechSatConfig() >>> # Initializing a model from the microsoft/unispeech-sat-base-100h-libri-ft style configuration >>> model = UniSpeechSatModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "unispeech-sat" def __init__( self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction="mean", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, num_clusters=504, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size self.num_clusters = num_clusters self.do_stable_layer_norm = do_stable_layer_norm self.use_weighted_layer_sum = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # parameters for pretraining with codevector quantized representations self.num_codevectors_per_group = num_codevectors_per_group self.num_codevector_groups = num_codevector_groups self.contrastive_logits_temperature = contrastive_logits_temperature self.feat_quantizer_dropout = feat_quantizer_dropout self.num_negatives = num_negatives self.codevector_dim = codevector_dim self.proj_codevector_dim = proj_codevector_dim self.diversity_loss_weight = diversity_loss_weight # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. self.classifier_proj_size = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) __all__ = ["UniSpeechSatConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/unispeech_sat/convert_unispeech_sat_original_pytorch_checkpoint_to_pytorch.py
src/transformers/models/unispeech_sat/convert_unispeech_sat_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert UniSpeechSat checkpoint.""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } TOP_LEVEL_KEYS = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights(fairseq_model, hf_model): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True else: for key, mapped_key in MAPPING.items(): mapped_key = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if "layer_norm_for_extract" in name and (".".join(name.split(".")[:-1]) != key): # special case since naming is very similar continue is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "bias" in name: weight_type = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj weight_type = "weight" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) @torch.no_grad() def convert_unispeech_sat_checkpoint( checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True ): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = UniSpeechSatConfig.from_pretrained(config_path) else: config = UniSpeechSatConfig() dict_path = "" if is_finetuned: hf_wav2vec = UniSpeechSatForCTC(config) else: hf_wav2vec = UniSpeechSatForPreTraining(config) model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])} ) model = model[0].eval() recursively_load_weights(model, hf_wav2vec) hf_wav2vec.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) args = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/unispeech_sat/modular_unispeech_sat.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_unispeech_sat.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import warnings from collections.abc import Callable from dataclasses import dataclass from typing import Optional, Union import numpy as np import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from ... import initialization as init from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...integrations.fsdp import is_fsdp_managed_module from ...masking_utils import create_bidirectional_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, is_peft_available, logging from .configuration_unispeech_sat import UniSpeechSatConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Output type of [`UniSpeechSatForPreTrainingOutput`], with potential hidden states and attentions. """ ) class UniSpeechSatForPreTrainingOutput(ModelOutput): r""" loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://huggingface.co/papers/2006.11477). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`, *optional*): Prediction scores of the contrastive loss model, i.e. the output of the model before the final softmax. projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. codevector_perplexity (`torch.FloatTensor` of shape `(1,)`): The perplexity of the codevector distribution, used to measure the diversity of the codebook. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None projected_states: Optional[torch.FloatTensor] = None projected_quantized_states: Optional[torch.FloatTensor] = None codevector_perplexity: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None class UniSpeechSatSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states class UniSpeechSatPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, ) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name="weight", dim=2) if hasattr(self.conv, "parametrizations"): weight_g = self.conv.parametrizations.weight.original0 weight_v = self.conv.parametrizations.weight.original1 else: weight_g = self.conv.weight_g weight_v = self.conv.weight_v deepspeed.zero.register_external_parameter(self, weight_v) deepspeed.zero.register_external_parameter(self, weight_g) else: self.conv = weight_norm(self.conv, name="weight", dim=2) self.padding = UniSpeechSatSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class UniSpeechSatNoLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class UniSpeechSatLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states class UniSpeechSatGroupNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class UniSpeechSatFeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [UniSpeechSatGroupNormConvLayer(config, layer_id=0)] + [ UniSpeechSatNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [ UniSpeechSatLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers) ] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: hidden_states = conv_layer(hidden_states) return hidden_states class UniSpeechSatFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: Optional[float] = None, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): if scaling is None: scaling = query.size(-1) ** -0.5 # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attention_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class UniSpeechSatAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[UniSpeechSatConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, # TODO: we need a refactor so that the different attention modules can get their specific kwargs # ATM, we have mixed things encoder, decoder, and encoder-decoder attn **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) # get query proj query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) current_states = key_value_states if is_cross_attention else hidden_states key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights, None class UniSpeechSatFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states class UniSpeechSatEncoderLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.attention = UniSpeechSatAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechSatFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, output_attentions=False): attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class UniSpeechSatEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechSatPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([UniSpeechSatEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 attention_mask = create_bidirectional_mask( config=self.config, input_embeds=hidden_states, attention_mask=attention_mask, ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.layerdrop if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class UniSpeechSatAttnAdapterLayer(nn.Module): def __init__(self, config): """ Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed up training throughput. """ super().__init__() self.input_dim = config.adapter_attn_dim self.hidden_dim = config.hidden_size self.norm = nn.LayerNorm(self.hidden_dim) self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim) self.act_fn = nn.ReLU() self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim) def forward(self, hidden_states: torch.FloatTensor): hidden_states = self.norm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class UniSpeechSatEncoderLayerStableLayerNorm(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.attention = UniSpeechSatAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechSatFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if getattr(config, "adapter_attn_dim", None) is not None: self.adapter_layer = UniSpeechSatAttnAdapterLayer(config) else: self.adapter_layer = None def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) if self.adapter_layer is not None: hidden_states = hidden_states + self.adapter_layer(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class UniSpeechSatEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechSatPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [UniSpeechSatEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 attention_mask = create_bidirectional_mask( config=self.config, input_embeds=hidden_states, attention_mask=attention_mask, ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.layerdrop if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class UniSpeechSatGumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {config.codevector_dim} must be divisible " f"by `config.num_codevector_groups` {self.num_groups} for concatenation" ) # storage for codebook variables (codewords) self.codevectors = nn.Parameter( torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups) ) self.weight_proj = nn.Linear(config.hidden_size, self.num_groups * self.num_vars) # can be decayed for training self.temperature = 2 @staticmethod def _compute_perplexity(probs, mask=None): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: # sample code vector probs via gumbel in differentiateable way codevector_probs = nn.functional.gumbel_softmax( hidden_states.float(), tau=self.temperature, hard=True ).type_as(hidden_states) # compute perplexity codevector_soft_dist = torch.softmax( hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return codevectors, perplexity @auto_docstring class UniSpeechSatPreTrainedModel(PreTrainedModel): config: UniSpeechSatConfig base_model_prefix = "unispeech_sat" main_input_name = "input_values" input_modalities = "audio" supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" # gumbel softmax requires special init if isinstance(module, UniSpeechSatGumbelVectorQuantizer): init.normal_(module.weight_proj.weight, mean=0.0, std=1) init.zeros_(module.weight_proj.bias) init.uniform_(module.codevectors) elif isinstance(module, UniSpeechSatPositionalConvEmbedding): init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) init.constant_(module.conv.bias, 0) elif isinstance(module, UniSpeechSatFeatureProjection): k = math.sqrt(1 / module.projection.in_features) init.uniform_(module.projection.weight, a=-k, b=k) init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): init.zeros_(module.bias) init.ones_(module.weight) elif isinstance(module, nn.Conv1d): init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/unispeech_sat/convert_unispeech_original_s3prl_checkpoint_to_pytorch.py
src/transformers/models/unispeech_sat/convert_unispeech_original_s3prl_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Hubert checkpoint.""" import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, Wav2Vec2FeatureExtractor, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) def convert_classification(base_model_name, hf_config, downstream_dict): model = UniSpeechSatForSequenceClassification.from_pretrained(base_model_name, config=hf_config) model.projector.weight.data = downstream_dict["projector.weight"] model.projector.bias.data = downstream_dict["projector.bias"] model.classifier.weight.data = downstream_dict["model.post_net.linear.weight"] model.classifier.bias.data = downstream_dict["model.post_net.linear.bias"] return model def convert_diarization(base_model_name, hf_config, downstream_dict): model = UniSpeechSatForAudioFrameClassification.from_pretrained(base_model_name, config=hf_config) model.classifier.weight.data = downstream_dict["model.linear.weight"] model.classifier.bias.data = downstream_dict["model.linear.bias"] return model def convert_xvector(base_model_name, hf_config, downstream_dict): model = UniSpeechSatForXVector.from_pretrained(base_model_name, config=hf_config) model.projector.weight.data = downstream_dict["connector.weight"] model.projector.bias.data = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel): model.tdnn[i].kernel.weight.data = downstream_dict[ f"model.framelevel_feature_extractor.module.{i}.kernel.weight" ] model.tdnn[i].kernel.bias.data = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"] model.feature_extractor.weight.data = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] model.feature_extractor.bias.data = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] model.classifier.weight.data = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] model.classifier.bias.data = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] model.objective.weight.data = downstream_dict["objective.W"] return model @torch.no_grad() def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path): """ Copy/paste/tweak model's weights to transformers design. """ checkpoint = torch.load(checkpoint_path, map_location="cpu", weights_only=True) downstream_dict = checkpoint["Downstream"] hf_config = UniSpeechSatConfig.from_pretrained(config_path) hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( base_model_name, return_attention_mask=True, do_normalize=False ) arch = hf_config.architectures[0] if arch.endswith("ForSequenceClassification"): hf_model = convert_classification(base_model_name, hf_config, downstream_dict) elif arch.endswith("ForAudioFrameClassification"): hf_model = convert_diarization(base_model_name, hf_config, downstream_dict) elif arch.endswith("ForXVector"): hf_model = convert_xvector(base_model_name, hf_config, downstream_dict) else: raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}") if hf_config.use_weighted_layer_sum: hf_model.layer_weights.data = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(model_dump_path) hf_model.save_pretrained(model_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") args = parser.parse_args() convert_s3prl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/unispeech_sat/modular_unispeech_sat.py
src/transformers/models/unispeech_sat/modular_unispeech_sat.py
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch UniSpeechSat model.""" import math from dataclasses import dataclass from typing import Optional, Union import torch import torch.nn as nn from ... import initialization as init from ...modeling_outputs import ModelOutput, Wav2Vec2BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ..wav2vec2.modeling_wav2vec2 import ( Wav2Vec2Encoder, Wav2Vec2EncoderStableLayerNorm, Wav2Vec2FeatureEncoder, Wav2Vec2FeatureProjection, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForCTC, Wav2Vec2ForSequenceClassification, Wav2Vec2ForXVector, Wav2Vec2GumbelVectorQuantizer, Wav2Vec2Model, Wav2Vec2PositionalConvEmbedding, ) from .configuration_unispeech_sat import UniSpeechSatConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 @dataclass @auto_docstring( custom_intro=""" Output type of [`UniSpeechSatForPreTrainingOutput`], with potential hidden states and attentions. """ ) class UniSpeechSatForPreTrainingOutput(ModelOutput): r""" loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://huggingface.co/papers/2006.11477). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`, *optional*): Prediction scores of the contrastive loss model, i.e. the output of the model before the final softmax. projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. codevector_perplexity (`torch.FloatTensor` of shape `(1,)`): The perplexity of the codevector distribution, used to measure the diversity of the codebook. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None projected_states: Optional[torch.FloatTensor] = None projected_quantized_states: Optional[torch.FloatTensor] = None codevector_perplexity: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None class UniSpeechSatPositionalConvEmbedding(Wav2Vec2PositionalConvEmbedding): pass class UniSpeechSatFeatureEncoder(Wav2Vec2FeatureEncoder): pass class UniSpeechSatFeatureProjection(Wav2Vec2FeatureProjection): pass class UniSpeechSatEncoder(Wav2Vec2Encoder): pass class UniSpeechSatEncoderStableLayerNorm(Wav2Vec2EncoderStableLayerNorm): pass class UniSpeechSatGumbelVectorQuantizer(Wav2Vec2GumbelVectorQuantizer): def __init__(self, config): super().__init__(config) self.weight_proj = nn.Linear(config.hidden_size, self.num_groups * self.num_vars) @staticmethod def _compute_perplexity(probs, mask=None): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: # sample code vector probs via gumbel in differentiateable way codevector_probs = nn.functional.gumbel_softmax( hidden_states.float(), tau=self.temperature, hard=True ).type_as(hidden_states) # compute perplexity codevector_soft_dist = torch.softmax( hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return codevectors, perplexity @auto_docstring class UniSpeechSatPreTrainedModel(PreTrainedModel): config: UniSpeechSatConfig base_model_prefix = "unispeech_sat" main_input_name = "input_values" input_modalities = "audio" supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" # gumbel softmax requires special init if isinstance(module, UniSpeechSatGumbelVectorQuantizer): init.normal_(module.weight_proj.weight, mean=0.0, std=1) init.zeros_(module.weight_proj.bias) init.uniform_(module.codevectors) elif isinstance(module, UniSpeechSatPositionalConvEmbedding): init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) init.constant_(module.conv.bias, 0) elif isinstance(module, UniSpeechSatFeatureProjection): k = math.sqrt(1 / module.projection.in_features) init.uniform_(module.projection.weight, a=-k, b=k) init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): init.zeros_(module.bias) init.ones_(module.weight) elif isinstance(module, nn.Conv1d): init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask UniSpeechSatBaseModelOutput = Wav2Vec2BaseModelOutput class UniSpeechSatModel(UniSpeechSatPreTrainedModel, Wav2Vec2Model): def __init__(self, config: UniSpeechSatConfig): UniSpeechSatPreTrainedModel.__init__(self, config) self.config = config self.feature_extractor = UniSpeechSatFeatureEncoder(config) self.feature_projection = UniSpeechSatFeatureProjection(config) self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = UniSpeechSatEncoderStableLayerNorm(config) else: self.encoder = UniSpeechSatEncoder(config) # Initialize weights and apply final processing self.post_init() def freeze_feature_encoder(self): raise AttributeError("Not needed for UniSpeechSat") def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, UniSpeechSatBaseModelOutput]: r""" mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return UniSpeechSatBaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" UniSpeechSat Model with a vector-quantization module and ctc loss for pre-training. """ ) class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel): def __init__(self, config: UniSpeechSatConfig): super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) self.dropout_features = nn.Dropout(config.feat_quantizer_dropout) self.quantizer = UniSpeechSatGumbelVectorQuantizer(config) self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim) self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim) self.dropout = nn.Dropout(config.final_dropout) self.speaker_proj = nn.Linear(config.hidden_size, config.codevector_dim) self.label_embeddings_concat = nn.Parameter(torch.FloatTensor(config.num_clusters, config.codevector_dim)) self.label_embeddings_concat.data.zero_() self.layer_norm_for_extract = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if self.config.do_stable_layer_norm: self.layer_norm_for_extract.requires_grad = False # Initialize weights and apply final processing self.post_init() def set_gumbel_temperature(self, temperature: int): """ Set the Gumbel softmax temperature to a given value. Only necessary for training """ self.quantizer.temperature = temperature def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() @staticmethod def compute_contrastive_logits( target_features: torch.FloatTensor, negative_features: torch.FloatTensor, predicted_features: torch.FloatTensor, temperature: int = 1, ): """ Compute logits for contrastive loss based using cosine similarity as the distance measure between `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. """ target_features = torch.cat([target_features, negative_features], dim=0) logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1) logits = logits.type_as(target_features) # apply temperature logits = logits / temperature return logits @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, UniSpeechSatForPreTrainingOutput]: r""" Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, UniSpeechSatForPreTraining >>> from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-sat-base") >>> model = UniSpeechSatForPreTraining.from_pretrained("microsoft/unispeech-sat-base") >>> # TODO: Add full pretraining example ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) transformer_features = outputs[0] # quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1]) # TODO(PVP) - add pretraining logic and add to tests logits = extract_features loss = quantized_features = codevector_perplexity = None if not return_dict: if loss is not None: return (loss, logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return (logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return UniSpeechSatForPreTrainingOutput( loss=loss, logits=logits, projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class UniSpeechSatForCTC(Wav2Vec2ForCTC): pass class UniSpeechSatForSequenceClassification(Wav2Vec2ForSequenceClassification): pass class UniSpeechSatForAudioFrameClassification(Wav2Vec2ForAudioFrameClassification): pass class UniSpeechSatForXVector(Wav2Vec2ForXVector): pass __all__ = [ "UniSpeechSatForAudioFrameClassification", "UniSpeechSatForCTC", "UniSpeechSatForPreTraining", "UniSpeechSatForSequenceClassification", "UniSpeechSatForXVector", "UniSpeechSatModel", "UniSpeechSatPreTrainedModel", ]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/unispeech_sat/__init__.py
src/transformers/models/unispeech_sat/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_unispeech_sat import * from .modeling_unispeech_sat import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/falcon/configuration_falcon.py
src/transformers/models/falcon/configuration_falcon.py
# coding=utf-8 # Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Falcon configuration""" from typing import Optional from ...configuration_utils import PreTrainedConfig from ...modeling_rope_utils import RopeParameters from ...utils import logging logger = logging.get_logger(__name__) class FalconConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`FalconModel`]. It is used to instantiate a Falcon model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 65024): Vocabulary size of the Falcon model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`FalconModel`] hidden_size (`int`, *optional*, defaults to 4544): Dimension of the hidden representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 71): Number of attention heads for each attention layer in the Transformer encoder. num_ln_in_parallel_attn (`int`, *optional*): Set to 2 if separate layer norms are to be used for the MLP and the attention output when using parallel attention, otherwise, 1. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for MLP layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for attention layers. num_kv_heads (`int`, *optional*): Number of key-value heads to use per attention layer. If unset, defaults to the same value as `num_attention_heads`. alibi (`bool`, *optional*, defaults to `False`): Whether to use ALiBi positional biases during self-attention. new_decoder_architecture (`bool`, *optional*, defaults to `False`): Whether to use the new (Falcon-40B) decoder architecture. If `True`, the `multi_query` and `parallel_attn` arguments are ignored, as the new decoder always uses parallel attention. multi_query (`bool`, *optional*, defaults to `True`): Whether to use multi-query attention in the decoder. Ignored when `new_decoder_architecture` is `True`. parallel_attn (`bool`, *optional*, defaults to `True`): Whether to compute attention in parallel with the feedforward layer. If False, they are consecutive instead, as in the original Transformer architecture. Ignored when `new_decoder_architecture` is `True`. bias (`bool`, *optional*, defaults to `False`): Whether to use bias on Linear layers. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with, when `alibi` is `False`. Pretrained Falcon models with RoPE support up to 2048 tokens. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. bos_token_id (`int`, *optional*, defaults to 11): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 11): The id of the "end-of-sequence" token. ffn_hidden_size (`int`, *optional*): The hidden size of the feedforward layer in the Transformer decoder. defaults to 4x hidden dim activation (`str`, *optional*, defaults to `"gelu"`): The activation function used in the feedforward layer. Example: ```python >>> from transformers import FalconModel, FalconConfig >>> # Initializing a small (2-layer) Falcon configuration >>> configuration = FalconConfig(num_hidden_layers=2) >>> # Initializing a model from the small configuration >>> model = FalconModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "falcon" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size: Optional[int] = 65024, hidden_size: Optional[int] = 4544, num_hidden_layers: Optional[int] = 32, num_attention_heads: Optional[int] = 71, num_ln_in_parallel_attn: Optional[int] = None, layer_norm_epsilon: Optional[int] = 1e-5, initializer_range: Optional[float] = 0.02, use_cache: Optional[bool] = True, hidden_dropout: Optional[float] = 0.0, attention_dropout: Optional[float] = 0.0, num_kv_heads: Optional[int] = None, alibi: Optional[bool] = False, new_decoder_architecture: Optional[bool] = False, multi_query: Optional[bool] = True, parallel_attn: Optional[bool] = True, bias: Optional[bool] = False, max_position_embeddings: Optional[int] = 2048, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, bos_token_id: Optional[int] = 11, eos_token_id: Optional[int] = 11, ffn_hidden_size: Optional[int] = None, activation: Optional[str] = "gelu", **kwargs, ): self.vocab_size = vocab_size # Backward compatibility with n_embed kwarg n_embed = kwargs.pop("n_embed", None) self.hidden_size = hidden_size if n_embed is None else n_embed self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.num_kv_heads = num_attention_heads if num_kv_heads is None else num_kv_heads self.alibi = alibi self.new_decoder_architecture = new_decoder_architecture self.multi_query = multi_query # Ignored when new_decoder_architecture is True self.parallel_attn = parallel_attn self.bias = bias self.num_ln_in_parallel_attn = num_ln_in_parallel_attn self.max_position_embeddings = max_position_embeddings self.activation = activation if ffn_hidden_size is None: self.ffn_hidden_size = hidden_size * 4 else: self.ffn_hidden_size = ffn_hidden_size self.rope_parameters = rope_parameters super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) @property def head_dim(self): return self.hidden_size // self.num_attention_heads @property def rotary(self): return not self.alibi __all__ = ["FalconConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/falcon/convert_custom_code_checkpoint.py
src/transformers/models/falcon/convert_custom_code_checkpoint.py
import json from argparse import ArgumentParser from pathlib import Path """ This script converts Falcon custom code checkpoints to modern Falcon checkpoints that use code in the Transformers library. After conversion, performance (especially for generation) should improve and the checkpoint can be loaded without needing trust_remote_code=True. """ if __name__ == "__main__": parser = ArgumentParser() parser.add_argument( "--checkpoint_dir", type=Path, required=True, help="Directory containing a custom code checkpoint to convert to a modern Falcon checkpoint.", ) args = parser.parse_args() if not args.checkpoint_dir.is_dir(): raise ValueError("--checkpoint_dir argument should be a directory!") if ( not (args.checkpoint_dir / "configuration_RW.py").is_file() or not (args.checkpoint_dir / "modelling_RW.py").is_file() ): raise ValueError( "The model directory should contain configuration_RW.py and modelling_RW.py files! Are you sure this is a custom code checkpoint?" ) (args.checkpoint_dir / "configuration_RW.py").unlink() (args.checkpoint_dir / "modelling_RW.py").unlink() config = args.checkpoint_dir / "config.json" text = config.read_text() text = text.replace("RWForCausalLM", "FalconForCausalLM") text = text.replace("RefinedWebModel", "falcon") text = text.replace("RefinedWeb", "falcon") json_config = json.loads(text) del json_config["auto_map"] if "n_head" in json_config: json_config["num_attention_heads"] = json_config.pop("n_head") if "n_layer" in json_config: json_config["num_hidden_layers"] = json_config.pop("n_layer") if "n_head_kv" in json_config: json_config["num_kv_heads"] = json_config.pop("n_head_kv") json_config["new_decoder_architecture"] = True else: json_config["new_decoder_architecture"] = False bos_token_id = json_config.get("bos_token_id", 1) eos_token_id = json_config.get("eos_token_id", 2) config.unlink() config.write_text(json.dumps(json_config, indent=2, sort_keys=True)) tokenizer_config = args.checkpoint_dir / "tokenizer_config.json" if tokenizer_config.is_file(): text = tokenizer_config.read_text() json_config = json.loads(text) if json_config["tokenizer_class"] == "PreTrainedTokenizerFast": json_config["model_input_names"] = ["input_ids", "attention_mask"] tokenizer_config.unlink() tokenizer_config.write_text(json.dumps(json_config, indent=2, sort_keys=True)) generation_config_path = args.checkpoint_dir / "generation_config.json" generation_dict = { "_from_model_config": True, "bos_token_id": bos_token_id, "eos_token_id": eos_token_id, "transformers_version": "4.33.0.dev0", } generation_config_path.write_text(json.dumps(generation_dict, indent=2, sort_keys=True)) print("Done! Please double-check that the new checkpoint works as expected.")
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/falcon/__init__.py
src/transformers/models/falcon/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_falcon import * from .modeling_falcon import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/falcon/modeling_falcon.py
src/transformers/models/falcon/modeling_falcon.py
# coding=utf-8 # Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Falcon model.""" import math from collections.abc import Callable from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from torch.nn import functional as F from ... import initialization as init from ...activations import get_activation from ...cache_utils import Cache, DynamicCache, StaticCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( AttentionMaskConverter, ) from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_rope_utils import ( ROPE_INIT_FUNCTIONS, dynamic_rope_update, ) from ...modeling_utils import PreTrainedModel from ...utils import ( auto_docstring, logging, ) from ...utils.generic import maybe_autocast from .configuration_falcon import FalconConfig if is_flash_attn_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) # NOTE(Hesslow): Unfortunately we did not fuse matmul and bias during training, this means that there's one additional quantization to bfloat16 between the operations. # In order not to degrade the quality of our HF-port, we keep these characteristics in the final model. class FalconLinear(nn.Linear): def forward(self, input: torch.Tensor) -> torch.Tensor: hidden_states = input @ self.weight.T if self.bias is None: return hidden_states return hidden_states + self.bias # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Falcon class FalconRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: FalconConfig, device=None): super().__init__() self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_type = self.config.rope_parameters["rope_type"] rope_init_fn: Callable = self.compute_default_rope_parameters if self.rope_type != "default": rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False) @staticmethod def compute_default_rope_parameters( config: Optional[FalconConfig] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with maybe_autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: batch_size, seq_length = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor( 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 ) powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor( 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 ) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) # Note: alibi will added to the attention bias that will be applied to the query, key product of attention # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length) # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) # => the query_length dimension will then be broadcasted correctly # This is more or less identical to T5's relative position bias: # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527 arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None].bfloat16() * arange_tensor return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) # Copied from transformers.models.bloom.modeling_bloom.dropout_add def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor: """ Dropout add function Args: x (`torch.tensor`): input tensor residual (`torch.tensor`): residual tensor prob (`float`): dropout probability training (`bool`): training mode """ out = F.dropout(x, p=prob, training=training) out = residual + out return out class FalconAttention(nn.Module): def __init__(self, config: FalconConfig, layer_idx=None): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.split_size = self.hidden_size self.hidden_dropout = config.hidden_dropout self.max_position_embeddings = config.max_position_embeddings self.is_causal = True self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) if self.head_dim * self.num_heads != self.hidden_size: raise ValueError( f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:" f" {self.num_heads})." ) # Layer-wise attention scaling self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) self.beta = self.inv_norm_factor if config.new_decoder_architecture: qkv_out_dim = (config.num_kv_heads * 2 + config.num_attention_heads) * self.head_dim elif config.multi_query: qkv_out_dim = self.hidden_size + 2 * self.head_dim else: qkv_out_dim = 3 * self.hidden_size self.query_key_value = FalconLinear(self.hidden_size, qkv_out_dim, bias=config.bias) self.new_decoder_architecture = config.new_decoder_architecture self.multi_query = config.multi_query self.dense = FalconLinear(self.hidden_size, self.hidden_size, bias=config.bias) self.attention_dropout = nn.Dropout(config.attention_dropout) self.num_kv_heads = config.num_kv_heads if (self.new_decoder_architecture or not self.multi_query) else 1 def _split_heads(self, fused_qkv: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Split the last dimension into (num_heads, head_dim), results share same memory storage as `fused_qkv` Args: fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim] Returns: query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] value: [batch_size, seq_length, num_heads, head_dim] """ if self.new_decoder_architecture: batch, seq_len, _ = fused_qkv.shape qkv = fused_qkv.view(batch, seq_len, -1, self.num_heads // self.num_kv_heads + 2, self.head_dim) query = qkv[:, :, :, :-2] key = qkv[:, :, :, [-2]] value = qkv[:, :, :, [-1]] key = torch.broadcast_to(key, query.shape) value = torch.broadcast_to(value, query.shape) query, key, value = [x.flatten(2, 3) for x in (query, key, value)] return query, key, value elif not self.multi_query: batch_size, seq_length, three_times_hidden_size = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim) return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :] else: batch_size, seq_length, three_times_hidden_size = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads + 2, self.head_dim) return fused_qkv[..., :-2, :], fused_qkv[..., [-2], :], fused_qkv[..., [-1], :] # Copied from transformers.models.bloom.modeling_bloom.BloomAttention._merge_heads def _merge_heads(self, x: torch.Tensor) -> torch.Tensor: """ Merge heads together over the last dimension Args: x (`torch.tensor`): [batch_size * num_heads, seq_length, head_dim] Returns: torch.tensor: [batch_size, seq_length, num_heads * head_dim] """ # What we want to achieve is: # batch_size * num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads * head_dim batch_size_and_num_heads, seq_length, _ = x.shape batch_size = batch_size_and_num_heads // self.num_heads # First view to decompose the batch size # batch_size * num_heads, seq_length, head_dim -> batch_size, num_heads, seq_length, head_dim x = x.view(batch_size, self.num_heads, seq_length, self.head_dim) # batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim x = x.permute(0, 2, 1, 3) # batch_size, seq_length, num_heads, head_dim -> batch_size, seq_length, num_heads * head_dim return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim) def forward( self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, layer_past: Optional[Cache] = None, use_cache: bool = False, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, ): fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads # 3 x [batch_size, seq_length, num_heads, head_dim] (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) batch_size, query_length, _, _ = query_layer.shape query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim) key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) if alibi is None: cos, sin = position_embeddings query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin) if layer_past is not None: cache_kwargs = {"cache_position": cache_position} if alibi is None: cache_kwargs.update({"sin": sin, "cos": cos}) key_layer, value_layer = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs) kv_length = key_layer.shape[-2] if ( self.config._attn_implementation == "sdpa" and query_layer.device.type == "cuda" and attention_mask is not None ): # For torch<=2.1.2, SDPA with memory-efficient backend is bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. query_layer = query_layer.contiguous() key_layer = key_layer.contiguous() value_layer = value_layer.contiguous() if attention_mask is not None: attention_mask = attention_mask[:, :, :, : key_layer.shape[-2]] if alibi is None: if self.config._attn_implementation == "sdpa" and not output_attentions: # We dispatch to SDPA's Flash Attention or Efficient kernels via this if statement instead of an # inline conditional assignment to support both torch.compile's `dynamic=True` and `fullgraph=True` # The query_length > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not # create a causal mask in case query_length == 1. is_causal = self.is_causal and attention_mask is None and query_length > 1 attn_output = torch.nn.functional.scaled_dot_product_attention( query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=0.0, is_causal=is_causal, ) attention_scores = None else: attention_scores = query_layer @ key_layer.transpose(-1, -2) attention_scores /= math.sqrt(self.head_dim) attention_scores = F.softmax(attention_scores + attention_mask, dim=-1, dtype=hidden_states.dtype) # It is unclear why dropout is not applied here (while it is with alibi). attn_output = attention_scores @ value_layer attn_output = attn_output.view(batch_size, self.num_heads, query_length, self.head_dim) attn_output = attn_output.permute(0, 2, 1, 3) attn_output = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.dense(attn_output) return attn_output, attention_scores else: if self.config._attn_implementation == "sdpa" and not output_attentions: # We dispatch to SDPA's Flash Attention or Efficient kernels via this if statement instead of an # inline conditional assignment to support both torch.compile's `dynamic=True` and `fullgraph=True` is_causal = self.is_causal and attention_mask is None and query_length > 1 attn_output = torch.nn.functional.scaled_dot_product_attention( query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=self.attention_dropout.p if self.training else 0.0, is_causal=is_causal, ) attention_probs = None attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.dense(attn_output) else: matmul_result = query_layer @ key_layer.transpose(-1, -2) # change view to [batch_size, num_heads, q_length, kv_length] attention_scores = matmul_result.view(batch_size, self.num_heads, query_length, kv_length) # cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length] input_dtype = attention_scores.dtype # `float16` has a minimum value of -65504.0, whereas `bfloat16` and `float32` have a minimum value of `-3.4e+38` if input_dtype == torch.float16 or input_dtype == torch.bfloat16: attention_scores = attention_scores.to(torch.float32) attention_logits = attention_scores + alibi.view(batch_size, self.num_heads, 1, -1) attention_logits *= self.inv_norm_factor attention_probs = F.softmax(attention_logits + attention_mask, dim=-1, dtype=hidden_states.dtype) # [batch_size, num_heads, q_length, kv_length] attention_probs = self.attention_dropout(attention_probs) # change view [batch_size, num_heads, q_length, kv_length] attention_probs_reshaped = attention_probs.view(batch_size, self.num_heads, query_length, kv_length) # matmul: [batch_size * num_heads, q_length, head_dim] attn_output = (attention_probs_reshaped @ value_layer).flatten(0, 1) # change view [batch_size, q_length, num_heads * head_dim] attn_output = self._merge_heads(attn_output) attn_output = self.dense(attn_output) return attn_output, attention_probs class FalconFlashAttention2(FalconAttention): """ Falcon flash attention module. This module inherits from `FalconAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() def forward( self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, layer_past: Optional[Cache] = None, use_cache: bool = False, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, ): fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads # 3 x [batch_size, seq_length, num_heads, head_dim] (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) batch_size, query_length, _, _ = query_layer.shape query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim) key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) if alibi is None: cos, sin = position_embeddings query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin) if layer_past is not None: cache_kwargs = {"cache_position": cache_position} if alibi is None: cache_kwargs.update({"sin": sin, "cos": cos}) key_layer, value_layer = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_layer = query_layer.transpose(1, 2) key_layer = key_layer.transpose(1, 2) value_layer = value_layer.transpose(1, 2) if alibi is not None: raise ValueError("`alibi` is not supported when `use_flash_attn` is True") attn_dropout = self.config.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query_layer.dtype device_type = query_layer.device.type if query_layer.device.type != "mps" else "cpu" if input_dtype == torch.float32: if torch.is_autocast_enabled(): # NOTE: `torch.get_autocast_dtype` is there starting from PyTorch 2.4 target_dtype = ( torch.get_autocast_dtype(device_type) if hasattr(torch, "get_autocast_dtype") else torch.get_autocast_gpu_dtype() ) # Handle the case where the model is quantized elif hasattr(self.config, "quantization_config"): target_dtype = self.config.dtype else: target_dtype = self.query_key_value.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_layer = query_layer.to(target_dtype) key_layer = key_layer.to(target_dtype) value_layer = value_layer.to(target_dtype) attn_output = _flash_attention_forward( query_layer, key_layer, value_layer, attention_mask, query_length, position_ids=position_ids, dropout=attn_dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_weights = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.dense(attn_weights) if not output_attentions: attn_weights = None return attn_output, attn_weights class FalconMLP(nn.Module): def __init__(self, config: FalconConfig): super().__init__() hidden_size = config.hidden_size self.dense_h_to_4h = FalconLinear(hidden_size, config.ffn_hidden_size, bias=config.bias) self.act = get_activation(config.activation) self.dense_4h_to_h = FalconLinear(config.ffn_hidden_size, hidden_size, bias=config.bias) self.hidden_dropout = config.hidden_dropout def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.act(self.dense_h_to_4h(x)) x = self.dense_4h_to_h(x) return x FALCON_ATTENTION_CLASSES = { "eager": FalconAttention, "sdpa": FalconAttention, # FalconAttention originally implemented both a forward with & without SDPA "flash_attention_2": FalconFlashAttention2, } class FalconDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: FalconConfig, layer_idx=None): super().__init__() hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.self_attention = FALCON_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) self.mlp = FalconMLP(config) self.hidden_dropout = config.hidden_dropout self.config = config if config.num_ln_in_parallel_attn is None and config.new_decoder_architecture: config.num_ln_in_parallel_attn = 2 if not config.parallel_attn: self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) else: if config.num_ln_in_parallel_attn == 2: # The layer norm before self-attention self.ln_attn = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) # The layer norm before the MLP self.ln_mlp = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) else: self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) def forward( self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, layer_past: Optional[Union[Cache, tuple[torch.Tensor, torch.Tensor]]] = None, use_cache: bool = False, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs, ): residual = hidden_states if self.config.new_decoder_architecture and self.config.num_ln_in_parallel_attn == 2: attention_layernorm_out = self.ln_attn(hidden_states) mlp_layernorm_out = self.ln_mlp(hidden_states) else: attention_layernorm_out = self.input_layernorm(hidden_states) # Self attention. attention_output, attn_weights = self.self_attention( attention_layernorm_out, layer_past=layer_past, attention_mask=attention_mask, position_ids=position_ids, alibi=alibi, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings, ) if not self.config.new_decoder_architecture: if self.config.parallel_attn: mlp_layernorm_out = attention_layernorm_out else: residual = dropout_add( attention_output, residual, self.config.attention_dropout, training=self.training ) mlp_layernorm_out = self.post_attention_layernorm(residual) if ( self.config.new_decoder_architecture and self.config.parallel_attn and self.config.num_ln_in_parallel_attn == 1 ): mlp_layernorm_out = attention_layernorm_out # MLP. mlp_output = self.mlp(mlp_layernorm_out) if self.config.new_decoder_architecture or self.config.parallel_attn: mlp_output += attention_output output = dropout_add(mlp_output, residual, self.config.hidden_dropout, training=self.training) return output, attn_weights @auto_docstring class FalconPreTrainedModel(PreTrainedModel): config: FalconConfig base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["FalconDecoderLayer"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True @torch.no_grad() def _init_weights(self, module: nn.Module): """Initialize the weights.""" super()._init_weights(module) if isinstance(module, FalconLinear): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: init.zeros_(module.bias) # Adapted from transformers.modeling_utils.PreTrainedModel._check_and_enable_sdpa @classmethod def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False): _is_bettertransformer = getattr(cls, "use_bettertransformer", False) if _is_bettertransformer: return config if not hard_check_only: config._attn_implementation = "sdpa" return config @auto_docstring class FalconModel(FalconPreTrainedModel): def __init__(self, config: FalconConfig):
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert_japanese/tokenization_bert_japanese.py
src/transformers/models/bert_japanese/tokenization_bert_japanese.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" import collections import copy import os import unicodedata from typing import Any, Optional from ...tokenization_python import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import is_sentencepiece_available, is_sudachi_projection_available, logging if is_sentencepiece_available(): import sentencepiece as spm else: spm = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "spm_file": "spiece.model"} SPIECE_UNDERLINE = "▁" def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertJapaneseTokenizer(PreTrainedTokenizer): r""" Construct a BERT tokenizer for Japanese text. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to: this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to a one-wordpiece-per-line vocabulary file. spm_file (`str`, *optional*): Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm or .model extension) that contains the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether to lower case the input. Only has an effect when do_basic_tokenize=True. do_word_tokenize (`bool`, *optional*, defaults to `True`): Whether to do word tokenization. do_subword_tokenize (`bool`, *optional*, defaults to `True`): Whether to do subword tokenization. word_tokenizer_type (`str`, *optional*, defaults to `"basic"`): Type of word tokenizer. Choose from ["basic", "mecab", "sudachi", "jumanpp"]. subword_tokenizer_type (`str`, *optional*, defaults to `"wordpiece"`): Type of subword tokenizer. Choose from ["wordpiece", "character", "sentencepiece",]. mecab_kwargs (`dict`, *optional*): Dictionary passed to the `MecabTokenizer` constructor. sudachi_kwargs (`dict`, *optional*): Dictionary passed to the `SudachiTokenizer` constructor. jumanpp_kwargs (`dict`, *optional*): Dictionary passed to the `JumanppTokenizer` constructor. """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, spm_file=None, do_lower_case=False, do_word_tokenize=True, do_subword_tokenize=True, word_tokenizer_type="basic", subword_tokenizer_type="wordpiece", never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", mecab_kwargs=None, sudachi_kwargs=None, jumanpp_kwargs=None, **kwargs, ): if subword_tokenizer_type == "sentencepiece": if not os.path.isfile(spm_file): raise ValueError( f"Can't find a vocabulary file at path '{spm_file}'. To load the vocabulary from a Google" " pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.spm_file = spm_file else: if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google" " pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_word_tokenize = do_word_tokenize self.word_tokenizer_type = word_tokenizer_type self.lower_case = do_lower_case self.never_split = never_split self.mecab_kwargs = copy.deepcopy(mecab_kwargs) self.sudachi_kwargs = copy.deepcopy(sudachi_kwargs) self.jumanpp_kwargs = copy.deepcopy(jumanpp_kwargs) if do_word_tokenize: if word_tokenizer_type == "basic": self.word_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=False ) elif word_tokenizer_type == "mecab": self.word_tokenizer = MecabTokenizer( do_lower_case=do_lower_case, never_split=never_split, **(mecab_kwargs or {}) ) elif word_tokenizer_type == "sudachi": self.word_tokenizer = SudachiTokenizer( do_lower_case=do_lower_case, never_split=never_split, **(sudachi_kwargs or {}) ) elif word_tokenizer_type == "jumanpp": self.word_tokenizer = JumanppTokenizer( do_lower_case=do_lower_case, never_split=never_split, **(jumanpp_kwargs or {}) ) else: raise ValueError(f"Invalid word_tokenizer_type '{word_tokenizer_type}' is specified.") self.do_subword_tokenize = do_subword_tokenize self.subword_tokenizer_type = subword_tokenizer_type if do_subword_tokenize: if subword_tokenizer_type == "wordpiece": self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) elif subword_tokenizer_type == "character": self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=str(unk_token)) elif subword_tokenizer_type == "sentencepiece": self.subword_tokenizer = SentencepieceTokenizer(vocab=self.spm_file, unk_token=str(unk_token)) else: raise ValueError(f"Invalid subword_tokenizer_type '{subword_tokenizer_type}' is specified.") super().__init__( spm_file=spm_file, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, do_lower_case=do_lower_case, do_word_tokenize=do_word_tokenize, do_subword_tokenize=do_subword_tokenize, word_tokenizer_type=word_tokenizer_type, subword_tokenizer_type=subword_tokenizer_type, never_split=never_split, mecab_kwargs=mecab_kwargs, sudachi_kwargs=sudachi_kwargs, jumanpp_kwargs=jumanpp_kwargs, token_type_ids_pattern="bert_style", token_type_ids_include_special_tokens=True, special_tokens_pattern="cls_sep", **kwargs, ) @property def do_lower_case(self): return self.lower_case def __getstate__(self): state = dict(self.__dict__) if self.word_tokenizer_type in ["mecab", "sudachi", "jumanpp"]: del state["word_tokenizer"] return state def __setstate__(self, state): self.__dict__ = state if self.word_tokenizer_type == "mecab": self.word_tokenizer = MecabTokenizer( do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.mecab_kwargs or {}) ) elif self.word_tokenizer_type == "sudachi": self.word_tokenizer = SudachiTokenizer( do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.sudachi_kwargs or {}) ) elif self.word_tokenizer_type == "jumanpp": self.word_tokenizer = JumanppTokenizer( do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.jumanpp_kwargs or {}) ) def _tokenize(self, text): if self.do_word_tokenize: tokens = self.word_tokenizer.tokenize(text, never_split=self.all_special_tokens) else: tokens = [text] if self.do_subword_tokenize: split_tokens = [sub_token for token in tokens for sub_token in self.subword_tokenizer.tokenize(token)] else: split_tokens = tokens return split_tokens @property def vocab_size(self): if self.subword_tokenizer_type == "sentencepiece": return len(self.subword_tokenizer.sp_model) return len(self.vocab) def get_vocab(self): if self.subword_tokenizer_type == "sentencepiece": vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab # base vocab vocab = dict(self.vocab) # + added_tokens_encoder (only for tokens not in base vocab) for token, index in self.added_tokens_encoder.items(): if token not in self.vocab: vocab[token] = index return vocab def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if self.subword_tokenizer_type == "sentencepiece": return self.subword_tokenizer.sp_model.PieceToId(token) return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if self.subword_tokenizer_type == "sentencepiece": return self.subword_tokenizer.sp_model.IdToPiece(index) return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" if self.subword_tokenizer_type == "sentencepiece": return self.subword_tokenizer.sp_model.decode(tokens) out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if os.path.isdir(save_directory): if self.subword_tokenizer_type == "sentencepiece": vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["spm_file"] ) else: vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"], ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory if self.subword_tokenizer_type == "sentencepiece": with open(vocab_file, "wb") as writer: content_spiece_model = self.subword_tokenizer.sp_model.serialized_model_proto() writer.write(content_spiece_model) else: with open(vocab_file, "w", encoding="utf-8") as writer: index = 0 for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) class MecabTokenizer: """Runs basic tokenization with MeCab morphological parser.""" def __init__( self, do_lower_case=False, never_split=None, normalize_text=True, mecab_dic: Optional[str] = "unidic_lite", mecab_option: Optional[str] = None, ): """ Constructs a MecabTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **mecab_dic**: (*optional*) string (default "ipadic") Name of dictionary to be used for MeCab initialization. If you are using a system-installed dictionary, set this option to `None` and modify *mecab_option*. **mecab_option**: (*optional*) string String passed to MeCab constructor. """ self.do_lower_case = do_lower_case self.never_split = never_split if never_split is not None else [] self.normalize_text = normalize_text try: import fugashi except ModuleNotFoundError as error: raise error.__class__( "You need to install fugashi to use MecabTokenizer. " "See https://pypi.org/project/fugashi/ for installation." ) mecab_option = mecab_option or "" if mecab_dic is not None: if mecab_dic == "ipadic": try: import ipadic except ModuleNotFoundError as error: raise error.__class__( "The ipadic dictionary is not installed. " "See https://github.com/polm/ipadic-py for installation." ) dic_dir = ipadic.DICDIR elif mecab_dic == "unidic_lite": try: import unidic_lite except ModuleNotFoundError as error: raise error.__class__( "The unidic_lite dictionary is not installed. " "See https://github.com/polm/unidic-lite for installation." ) dic_dir = unidic_lite.DICDIR elif mecab_dic == "unidic": try: import unidic except ModuleNotFoundError as error: raise error.__class__( "The unidic dictionary is not installed. " "See https://github.com/polm/unidic-py for installation." ) dic_dir = unidic.DICDIR if not os.path.isdir(dic_dir): raise RuntimeError( "The unidic dictionary itself is not found. " "See https://github.com/polm/unidic-py for installation." ) else: raise ValueError("Invalid mecab_dic is specified.") mecabrc = os.path.join(dic_dir, "mecabrc") mecab_option = f'-d "{dic_dir}" -r "{mecabrc}" ' + mecab_option self.mecab = fugashi.GenericTagger(mecab_option) def tokenize(self, text, never_split=None, **kwargs): """Tokenizes a piece of text.""" if self.normalize_text: text = unicodedata.normalize("NFKC", text) never_split = self.never_split + (never_split if never_split is not None else []) tokens = [] for word in self.mecab(text): token = word.surface if self.do_lower_case and token not in never_split: token = token.lower() tokens.append(token) return tokens class SudachiTokenizer: """Runs basic tokenization with Sudachi morphological parser.""" def __init__( self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False, sudachi_split_mode="A", sudachi_config_path=None, sudachi_resource_dir=None, sudachi_dict_type="core", sudachi_projection=None, ): """ Constructs a SudachiTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **trim_whitespace**: (*optional*) boolean (default False) Whether to trim all whitespace, tab, newline from tokens. **sudachi_split_mode**: (*optional*) string Split mode of sudachi, choose from `["A", "B", "C"]`. **sudachi_config_path**: (*optional*) string **sudachi_resource_dir**: (*optional*) string **sudachi_dict_type**: (*optional*) string dict type of sudachi, choose from `["small", "core", "full"]`. **sudachi_projection**: (*optional*) string Word projection mode of sudachi, choose from `["surface", "normalized", "reading", "dictionary", "dictionary_and_surface", "normalized_and_surface", "normalized_nouns"]`. """ self.do_lower_case = do_lower_case self.never_split = never_split if never_split is not None else [] self.normalize_text = normalize_text self.trim_whitespace = trim_whitespace try: from sudachipy import dictionary, tokenizer except ImportError: raise ImportError( "You need to install sudachipy to use SudachiTokenizer. " "See https://github.com/WorksApplications/SudachiPy for installation." ) if sudachi_split_mode == "A": self.split_mode = tokenizer.Tokenizer.SplitMode.A elif sudachi_split_mode == "B": self.split_mode = tokenizer.Tokenizer.SplitMode.B elif sudachi_split_mode == "C": self.split_mode = tokenizer.Tokenizer.SplitMode.C else: raise ValueError("Invalid sudachi_split_mode is specified.") self.projection = sudachi_projection sudachi_dictionary = dictionary.Dictionary( config_path=sudachi_config_path, resource_dir=sudachi_resource_dir, dict=sudachi_dict_type ) if is_sudachi_projection_available(): self.sudachi = sudachi_dictionary.create(self.split_mode, projection=self.projection) elif self.projection is not None: raise ImportError("You need to install sudachipy>=0.6.8 to specify `projection` field in sudachi_kwargs.") else: self.sudachi = sudachi_dictionary.create(self.split_mode) def tokenize(self, text, never_split=None, **kwargs): """Tokenizes a piece of text.""" if self.normalize_text: text = unicodedata.normalize("NFKC", text) never_split = self.never_split + (never_split if never_split is not None else []) tokens = [] for word in self.sudachi.tokenize(text): token = word.surface() if self.do_lower_case and token not in never_split: token = token.lower() if self.trim_whitespace: if token.strip() == "": continue else: token = token.strip() tokens.append(token) return tokens class JumanppTokenizer: """Runs basic tokenization with jumanpp morphological parser.""" def __init__( self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False, ): """ Constructs a JumanppTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **trim_whitespace**: (*optional*) boolean (default False) Whether to trim all whitespace, tab, newline from tokens. """ self.do_lower_case = do_lower_case self.never_split = never_split if never_split is not None else [] self.normalize_text = normalize_text self.trim_whitespace = trim_whitespace try: import rhoknp except ImportError: raise ImportError( "You need to install rhoknp to use JumanppTokenizer. " "See https://github.com/ku-nlp/rhoknp for installation." ) self.juman = rhoknp.Jumanpp() def tokenize(self, text, never_split=None, **kwargs): """Tokenizes a piece of text.""" if self.normalize_text: text = unicodedata.normalize("NFKC", text) text = text.strip() never_split = self.never_split + (never_split if never_split is not None else []) tokens = [] for mrph in self.juman.apply_to_sentence(text).morphemes: token = mrph.text if self.do_lower_case and token not in never_split: token = token.lower() if self.trim_whitespace: if token.strip() == "": continue else: token = token.strip() tokens.append(token) return tokens class CharacterTokenizer: """Runs Character tokenization.""" def __init__(self, vocab, unk_token, normalize_text=True): """ Constructs a CharacterTokenizer. Args: **vocab**: Vocabulary object. **unk_token**: str A special symbol for out-of-vocabulary token. **normalize_text**: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization. """ self.vocab = vocab self.unk_token = unk_token self.normalize_text = normalize_text def tokenize(self, text): """ Tokenizes a piece of text into characters. For example, `input = "apple""` will return as output `["a", "p", "p", "l", "e"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of characters. """ if self.normalize_text: text = unicodedata.normalize("NFKC", text) output_tokens = [] for char in text: if char not in self.vocab: output_tokens.append(self.unk_token) continue output_tokens.append(char) return output_tokens class BasicTokenizer: """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) or (cp >= 0x20000 and cp <= 0x2A6DF) or (cp >= 0x2A700 and cp <= 0x2B73F) or (cp >= 0x2B740 and cp <= 0x2B81F) or (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) ): return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer: """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` will return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad:
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert_japanese/__init__.py
src/transformers/models/bert_japanese/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .tokenization_bert_japanese import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py
src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py
# coding=utf-8 # Copyright 2023 The BigCode team and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GPTBigCode configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class GPTBigCodeConfig(PreTrainedConfig): """ This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a GPTBigCode model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTBigCode [gpt_bigcode](https://huggingface.co/gpt_bigcode) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50257): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTBigCodeModel`]. n_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states. n_layer (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. n_inner (`int`, *optional*, defaults to None): Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd activation_function (`str`, *optional*, defaults to `"gelu_pytorch_tanh"`): Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new", "gelu_pytorch_tanh"]`. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_attn_weights (`bool`, *optional*, defaults to `True`): Scale attention weights by dividing by sqrt(hidden_size).. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`): Whether to call the fused softmax in float32. scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`): Whether to scale the attention softmax in float32. attention_type (`bool`, *optional*, defaults to `True`): Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`). Example: ```python >>> from transformers import GPTBigCodeConfig, GPTBigCodeModel >>> # Initializing a GPTBigCode configuration >>> configuration = GPTBigCodeConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = GPTBigCodeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gpt_bigcode" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function="gelu_pytorch_tanh", resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, attention_softmax_in_fp32=True, scale_attention_softmax_in_fp32=True, multi_query=True, **kwargs, ): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.attention_softmax_in_fp32 = attention_softmax_in_fp32 self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32 self.multi_query = multi_query self.num_key_value_heads = 1 if multi_query else n_head self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) __all__ = ["GPTBigCodeConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
# coding=utf-8 # Copyright 2023 The Bigcode team and HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch GPTBigCode model.""" import math from collections.abc import Callable from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ... import initialization as init from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...utils import ( auto_docstring, can_return_tuple, logging, ) from .configuration_gpt_bigcode import GPTBigCodeConfig logger = logging.get_logger(__name__) # Fused kernels # Use separate functions for each case because conditionals prevent kernel fusion. # TODO: Could have better fused kernels depending on scaling and dropout. # Is it doable without writing 32 functions? @torch.jit.script def upcast_masked_softmax( x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor, scale: float, softmax_dtype: torch.dtype ): input_dtype = x.dtype x = x.to(softmax_dtype) * scale x = torch.where(mask, x, mask_value) x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype) return x @torch.jit.script def upcast_softmax(x: torch.Tensor, scale: float, softmax_dtype: torch.dtype): input_dtype = x.dtype x = x.to(softmax_dtype) * scale x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype) return x @torch.jit.script def masked_softmax(x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor): x = torch.where(mask, x, mask_value) x = torch.nn.functional.softmax(x, dim=-1) return x def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class GPTBigCodeAttention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() self.config = config self.mask_value = None self.multi_query = config.multi_query self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.kv_heads = 1 if self.multi_query else self.num_heads self.kv_dim = self.kv_heads * self.head_dim self.num_key_value_groups = self.num_heads // self.kv_heads self.split_size = self.embed_dim self.is_causal = True if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale_attn_weights = config.scale_attn_weights self.scaling = self.head_dim**-0.5 if config.scale_attn_weights else 1.0 self.is_cross_attention = is_cross_attention self.layer_idx = layer_idx self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 self.scale_attention_softmax_in_fp32 = ( config.scale_attention_softmax_in_fp32 and config.attention_softmax_in_fp32 ) self.attn_pdrop = config.attn_pdrop if self.is_cross_attention: if self.multi_query: raise NotImplementedError("Multi-Query Attention not supported for cross_attention") self.c_attn = nn.Linear(self.embed_dim, 2 * self.embed_dim) self.q_attn = nn.Linear(self.embed_dim, self.embed_dim) else: self.c_attn = nn.Linear(self.embed_dim, self.embed_dim + 2 * self.kv_dim) self.c_proj = nn.Linear(self.embed_dim, self.embed_dim) self.attn_dropout = config.attn_pdrop self.resid_dropout = nn.Dropout(config.resid_pdrop) def forward( self, hidden_states: torch.Tensor, layer_past: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[ tuple[torch.Tensor, Optional[torch.Tensor]], tuple[torch.Tensor, Optional[torch.Tensor], tuple[torch.Tensor, ...]], ]: input_shape = hidden_states.shape[:-1] if layer_past is not None: if isinstance(layer_past, EncoderDecoderCache): is_updated = layer_past.is_updated.get(self.layer_idx) if self.is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_values = layer_past.cross_attention_cache else: curr_past_key_values = layer_past.self_attention_cache else: curr_past_key_values = layer_past if self.is_cross_attention: if not hasattr(self, "q_attn") or not self.is_cross_attention: raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`." ) if layer_past is not None and is_updated: # reuse k,v, cross_attentions key = curr_past_key_values.layers[self.layer_idx].keys value = curr_past_key_values.layers[self.layer_idx].values else: query = self.q_attn(hidden_states).view(*input_shape, -1, self.head_dim).transpose(1, 2) key, value = self.c_attn(encoder_hidden_states).split((self.head_dim, self.head_dim), dim=-1) else: if self.multi_query: query, key, value = ( self.c_attn(hidden_states).unsqueeze(1).split((self.embed_dim, self.kv_dim, self.kv_dim), dim=3) ) query = query.view(*input_shape, -1, self.head_dim).transpose(1, 2) else: query, key, value = ( self.c_attn(hidden_states) .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim) .transpose(1, 2) .split(3 * [self.head_dim], dim=3) ) if layer_past is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not self.is_cross_attention else None key, value = curr_past_key_values.update(key, value, self.layer_idx, {"cache_position": cache_position}) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if self.is_cross_attention: layer_past.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query, key, value, attention_mask, dropout=0.0 if not self.training else self.attn_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) return attn_output, attn_weights class GPTBigCodeMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = nn.Linear(embed_dim, intermediate_size) self.c_proj = nn.Linear(intermediate_size, embed_dim) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) # Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP.forward def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GPTBigCodeBlock(GradientCheckpointingLayer): def __init__(self, config, layer_idx=None): super().__init__() hidden_size = config.hidden_size self.inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPTBigCodeAttention(config, layer_idx=layer_idx) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: if config.multi_query: raise NotImplementedError("Cross-attention not implemented for MQA") self.crossattention = GPTBigCodeAttention(config, is_cross_attention=True, layer_idx=layer_idx) self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPTBigCodeMLP(self.inner_dim, config) def forward( self, hidden_states: Optional[tuple[torch.Tensor]], encoder_hidden_states: Optional[torch.Tensor] = None, layer_past: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[ tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor, torch.Tensor, torch.Tensor] ]: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn( hidden_states, layer_past=layer_past, attention_mask=attention_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) attn_output = attn_outputs[0] # output_attn: a, present, (attentions) outputs = attn_outputs[1:] # residual connection hidden_states = attn_output + residual if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_outputs = self.crossattention( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) attn_output = cross_attn_outputs[0] # residual connection hidden_states = residual + attn_output outputs = outputs + cross_attn_outputs[1:] # add cross attentions if we output attention weights residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states return (hidden_states,) + outputs @auto_docstring class GPTBigCodePreTrainedModel(PreTrainedModel): config: GPTBigCodeConfig base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["GPTBigCodeBlock"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True @torch.no_grad() def _init_weights(self, module): """Initialize the weights.""" super()._init_weights(module) if isinstance(module, (GPTBigCodeMLP, GPTBigCodeAttention)): # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py init.normal_( module.c_proj.weight, mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.n_layer) ) elif isinstance(module, GPTBigCodeModel): max_positions = module.config.max_position_embeddings init.copy_(module.bias, torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool))) @auto_docstring class GPTBigCodeModel(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.multi_query = config.multi_query self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPTBigCodeBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) max_positions = config.max_position_embeddings self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)), persistent=False ) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, position_ids=position_ids, past_key_values=past_key_values, ) if self.config._attn_implementation == "flash_attention_2": encoder_attention_mask = ( encoder_attention_mask.bool() if (encoder_attention_mask is not None and 0 in encoder_attention_mask) else None ) else: # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if ( self.config.add_cross_attention and encoder_hidden_states is not None and encoder_attention_mask is not None ): if encoder_attention_mask.dim() == 2: encoder_attention_mask.unsqueeze(1) assert encoder_attention_mask.dim() == 3 encoder_attention_mask = encoder_attention_mask.bool().unsqueeze(2 if self.multi_query else 1) else: encoder_attention_mask = None position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block( hidden_states, # as a positional argument for gradient checkpointing encoder_hidden_states, # as a positional argument for gradient checkpointing layer_past=past_key_values, # as keyword argument so it can be removed by GradientCheckpointingLayer attention_mask=causal_mask, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[2],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) @auto_docstring( custom_intro=""" The GPT_BIGCODE Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """ ) class GPTBigCodeForCausalLM(GPTBigCodePreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"} def __init__(self, config): super().__init__(config) self.transformer = GPTBigCodeModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> Union[tuple, CausalLMOutputWithCrossAttentions]: r""" input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.Tensor` of shape `(batch_size, input_ids_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = transformer_outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) @auto_docstring( custom_intro=""" The GPTBigCode Model transformer with a sequence classification head on top (linear layer). [`GPTBigCodeForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """ ) class GPTBigCodeForSequenceClassification(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTBigCodeModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, SequenceClassifierOutputWithPast]: r""" input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once(
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/gpt_bigcode/__init__.py
src/transformers/models/gpt_bigcode/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_gpt_bigcode import * from .modeling_gpt_bigcode import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blip/configuration_blip.py
src/transformers/models/blip/configuration_blip.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Blip model configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class BlipTextConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`BlipTextModel`]. It is used to instantiate a BLIP text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `BlipText` used by the [base architectures](https://huggingface.co/Salesforce/blip-vqa-base). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30524): Vocabulary size of the `Blip` text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlipModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. encoder_hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers from the vision model. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. bos_token_id (`int`, *optional*, defaults to 30522): The id of the `beginning-of-sequence` token. eos_token_id (`int`, *optional*, defaults to 2): The id of the `end-of-sequence` token. pad_token_id (`int`, *optional*, defaults to 0): The id of the `padding` token. sep_token_id (`int`, *optional*, defaults to 102): The id of the `separator` token. is_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as a decoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). label_smoothing (float, *optional*): A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground truth and a uniform distribution as described in `Rethinking the Inception Architecture for Computer Vision <https://huggingface.co/papers/1512.00567>`__. Default: :math:`0.0`. Example: ```python >>> from transformers import BlipTextConfig, BlipTextModel >>> # Initializing a BlipTextConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipTextConfig() >>> # Initializing a BlipTextModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "blip_text_model" base_config_key = "text_config" def __init__( self, vocab_size=30524, hidden_size=768, encoder_hidden_size=768, intermediate_size=3072, projection_dim=768, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=512, hidden_act="gelu", layer_norm_eps=1e-12, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, bos_token_id=30522, eos_token_id=2, pad_token_id=0, sep_token_id=102, is_decoder=True, use_cache=True, label_smoothing=0.0, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, sep_token_id=sep_token_id, **kwargs, ) self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_hidden_size = encoder_hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.hidden_dropout_prob = hidden_dropout_prob self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.attention_probs_dropout_prob = attention_probs_dropout_prob self.is_decoder = is_decoder self.use_cache = use_cache self.label_smoothing = label_smoothing class BlipVisionConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`BlipVisionModel`]. It is used to instantiate a BLIP vision model according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the Blip-base [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import BlipVisionConfig, BlipVisionModel >>> # Initializing a BlipVisionConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipVisionConfig() >>> # Initializing a BlipVisionModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "blip_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, image_size=384, patch_size=16, hidden_act="gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=1e-10, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act class BlipConfig(PreTrainedConfig): r""" [`BlipConfig`] is the configuration class to store the configuration of a [`BlipModel`]. It is used to instantiate a BLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-base [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BlipTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BlipVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original BLIP implementation. image_text_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden state of the image-text fusion layer. label_smoothing (float, optional, *optional*, defaults to 0.0): A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground truth and a uniform distribution as described in `Rethinking the Inception Architecture for Computer Vision <https://huggingface.co/papers/1512.00567>`__. Default: :math:`0.0`. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import BlipConfig, BlipModel >>> # Initializing a BlipConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipConfig() >>> # Initializing a BlipPModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a BlipConfig from a BlipTextConfig and a BlipVisionConfig >>> # Initializing a BLIPText and BLIPVision configuration >>> config_text = BlipTextConfig() >>> config_vision = BlipVisionConfig() >>> config = BlipConfig(text_config=config_text, vision_config=config_vision) ```""" model_type = "blip" sub_configs = {"text_config": BlipTextConfig, "vision_config": BlipVisionConfig} def __init__( self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, image_text_hidden_size=256, label_smoothing=0.0, **kwargs, ): if text_config is None: text_config = BlipTextConfig() logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values.") elif isinstance(text_config, dict): text_config = BlipTextConfig(**text_config) if vision_config is None: vision_config = BlipVisionConfig() logger.info("`vision_config` is `None`. initializing the `BlipVisionConfig` with default values.") elif isinstance(vision_config, dict): vision_config = BlipVisionConfig(**vision_config) self.text_config = text_config self.vision_config = vision_config self.text_config.encoder_hidden_size = self.vision_config.hidden_size self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.initializer_range = 0.02 self.image_text_hidden_size = image_text_hidden_size self.label_smoothing = label_smoothing super().__init__(**kwargs) __all__ = ["BlipConfig", "BlipTextConfig", "BlipVisionConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blip/modeling_blip_text.py
src/transformers/models/blip/modeling_blip_text.py
# coding=utf-8 # Copyright 2022 The Salesforce Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the BSD-3-clause license (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Optional, Union import torch from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from ... import initialization as init from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import logging from .configuration_blip import BlipTextConfig logger = logging.get_logger(__name__) # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L52 class BlipTextEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.config = config def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L97 class BlipTextSelfAttention(nn.Module): def __init__(self, config, is_cross_attention, layer_idx=None): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.layer_idx = layer_idx self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor]: batch_size, seq_length, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None attention_mask = encoder_attention_mask if is_cross_attention else attention_mask is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_layer from cache curr_past_key_values = past_key_values.cross_attention_cache else: curr_past_key_values = past_key_values.self_attention_cache else: curr_past_key_values = past_key_values current_states = encoder_hidden_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_layer = curr_past_key_values.layers[self.layer_idx].keys value_layer = curr_past_key_values.layers[self.layer_idx].values else: key_layer = ( self.key(current_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(current_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) if past_key_values is not None: # save all key/value_layer to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_layer, value_layer = curr_past_key_values.update( key_layer, value_layer, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BlipTextModel forward() function) attention_scores = attention_scores + attention_mask.to(attention_scores.device) # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer, attention_probs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert -> BlipText class BlipTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#242 class BlipTextAttention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() self.self = BlipTextSelfAttention(config, is_cross_attention, layer_idx=layer_idx) self.output = BlipTextSelfOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert -> BlipText class BlipTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert -> BlipText class BlipTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BlipTextLayer(GradientCheckpointingLayer): def __init__(self, config, layer_num): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BlipTextAttention(config, layer_idx=layer_num) self.layer_num = layer_num if self.config.is_decoder: self.crossattention = BlipTextAttention( config, is_cross_attention=self.config.is_decoder, layer_idx=layer_num ) self.intermediate = BlipTextIntermediate(config) self.output = BlipTextOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor]: self_attention_outputs = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, past_key_values=past_key_values, cache_position=cache_position, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return (layer_output,) + outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L386 class BlipTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BlipTextLayer(config, i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if use_cache: # The model acts as encoder decoder but is not an encoder decoder. So we cast all cache objects to # `EncoderDecoderCache` type assuming that the incoming cache is from `self_attention` if isinstance(past_key_values, DynamicCache): past_key_values = EncoderDecoderCache(past_key_values, DynamicCache(config=self.config)) elif past_key_values is None: past_key_values = EncoderDecoderCache( DynamicCache(config=self.config), DynamicCache(config=self.config) ) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions, cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->BlipText class BlipTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BlipText class BlipTextPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BlipText class BlipTextLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BlipTextPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=True) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BlipText class BlipTextOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BlipTextLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L548 class BlipTextPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: BlipTextConfig base_model_prefix = "bert" _no_split_modules = [] def _init_weights(self, module): super()._init_weights(module) if isinstance(module, BlipTextEmbeddings): init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1))) # Adapted from https://github.com/salesforce/BLIP/blob/3a29b7410476bf5f2ba0955827390eb6ea1f4f9d/models/med.py#L571 class BlipTextModel(BlipTextPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and `is_decoder` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BlipTextEmbeddings(config) self.encoder = BlipTextEncoder(config) self.pooler = BlipTextPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def get_extended_attention_mask( self, attention_mask: Tensor, input_shape: tuple[int], device: device, is_decoder: bool ) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`tuple[int]`): The shape of the input to the model. device (`torch.device`): The device of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] # in case past_key_values are used we need to add a prefix ones mask to the causal mask causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat( [ torch.ones( (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype ), causal_mask, ], axis=-1, ) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, is_decoder: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Cache`, *optional*): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() batch_size, seq_length = input_shape device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape device = inputs_embeds.device elif encoder_embeds is not None: input_shape = encoder_embeds.size()[:-1] batch_size, seq_length = input_shape device = encoder_embeds.device else: raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") past_key_values_length = 0 if past_key_values is None else past_key_values.get_seq_length() if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length)).to(device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, input_shape, device, is_decoder ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None if encoder_embeds is None: embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) else: embedding_output = encoder_embeds encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict:
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blip/processing_blip.py
src/transformers/models/blip/processing_blip.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for Blip. """ from typing import Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput class BlipProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "stride": 0, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_token_type_ids": False, "return_length": False, "verbose": True, }, } class BlipProcessor(ProcessorMixin): r""" Constructs a BLIP processor which wraps a BERT tokenizer and BLIP image processor into a single processor. [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`BertTokenizerFast`]. See the docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information. Args: image_processor (`BlipImageProcessor`): An instance of [`BlipImageProcessor`]. The image processor is a required input. tokenizer (`BertTokenizerFast`): An instance of ['BertTokenizerFast`]. The tokenizer is a required input. """ def __init__(self, image_processor, tokenizer, **kwargs): tokenizer.return_token_type_ids = False super().__init__(image_processor, tokenizer) def __call__( self, images: Optional[ImageInput] = None, text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]] = None, **kwargs: Unpack[BlipProcessorKwargs], ) -> BatchEncoding: """ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and [`BertTokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. Args: images (`ImageInput`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. """ if images is None and text is None: raise ValueError("You have to specify either images or text.") text_encoding = None # add pixel_values encoding. If we also have text_encoding, update image encoding and return it. # else, return the text encoding. output_kwargs = self._merge_kwargs( BlipProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if text is not None: text_encoding = self.tokenizer(text, **output_kwargs["text_kwargs"]) if images is not None: encoding_image_processor = self.image_processor(images, **output_kwargs["images_kwargs"]) if text_encoding is not None: encoding_image_processor.update(text_encoding) return encoding_image_processor return text_encoding @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names tokenizer_input_names = [name for name in tokenizer_input_names if name != "token_type_ids"] return tokenizer_input_names + image_processor_input_names __all__ = ["BlipProcessor"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blip/__init__.py
src/transformers/models/blip/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_blip import * from .image_processing_blip import * from .image_processing_blip_fast import * from .modeling_blip import * from .modeling_blip_text import * from .processing_blip import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blip/modeling_blip.py
src/transformers/models/blip/modeling_blip.py
# coding=utf-8 # Copyright 2022 The Salesforce Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BLIP model.""" import warnings from dataclasses import dataclass from typing import Any, Optional, Union import torch from torch import nn from torch.nn.functional import normalize from ... import initialization as init from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int from ...utils.generic import check_model_inputs from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel logger = logging.get_logger(__name__) # Copied from transformers.models.clip.modeling_clip.contrastive_loss def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->blip def blip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass @auto_docstring( custom_intro=""" Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. This class also adds the loss term from the text decoder. """ ) class BlipForConditionalGenerationModelOutput(ModelOutput): r""" loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Language modeling loss from the text decoder. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*): Prediction scores of the language modeling head of the text decoder model. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*): The image embeddings obtained after applying the Vision Transformer model to the input image. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tuple[torch.FloatTensor]] = None logits: Optional[tuple[torch.FloatTensor]] = None image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @property def decoder_logits(self): warnings.warn( "`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers." " Please use the `logits` attribute to retrieve the final output instead.", FutureWarning, ) return self.logits @dataclass @auto_docstring( custom_intro=""" Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. This class also adds the loss term from the text decoder. """ ) class BlipTextVisionModelOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss from the text decoder. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. """ loss: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity scores. """ ) class BlipImageTextMatchingModelOutput(ModelOutput): r""" itm_score (`torch.FloatTensor`): The image-text similarity scores. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss from the text decoder. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. vision_pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*): Last layer hidden-state of the vision of the vision-only branch of the model. question_embeds (`torch.FloatTensor`): The question embeddings obtained by the text projection layer. """ itm_score: Optional[torch.FloatTensor] = None loss: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None vision_pooler_output: Optional[torch.FloatTensor] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None question_embeds: Optional[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring class BlipOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`BlipTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`BlipVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) class BlipVisionEmbeddings(nn.Module): def __init__(self, config: BlipVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding class_pos_embed = self.position_embedding[:, :1] patch_pos_embed = self.position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: position_embedding = self.interpolate_pos_encoding(embeddings, height, width) else: position_embedding = self.position_embedding embeddings = embeddings + position_embedding[:, : embeddings.size(1), :].to(target_dtype) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->Blip class BlipTextEmbeddings(nn.Module): def __init__(self, config: BlipTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] max_position_embedding = self.position_embedding.weight.shape[0] if seq_length > max_position_embedding: raise ValueError( f"Sequence length must be less than max_position_embeddings (got `sequence length`: " f"{seq_length} and max_position_embeddings: {max_position_embedding}" ) if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class BlipAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = nn.Dropout(config.attention_dropout) self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim) self.projection = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() mixed_qkv = ( self.qkv(hidden_states) .reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads) .permute(2, 0, 3, 1, 4) ) query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2] # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) attention_scores = attention_scores * self.scale # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,) context_layer = context_layer.reshape(new_context_layer_shape) output = self.projection(context_layer) return output, attention_probs # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Blip class BlipMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class BlipEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlipConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = BlipAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = BlipMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward( self, hidden_states: torch.Tensor, **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, _ = self.self_attn( hidden_states=hidden_states, **kwargs, ) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual return hidden_states @auto_docstring class BlipPreTrainedModel(PreTrainedModel): config: BlipConfig base_model_prefix = "blip" input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["BlipEncoderLayer", "BlipTextEmbeddings"] _skip_keys_device_placement = ["past_key_values"] @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" super()._init_weights(module) std = self.config.initializer_range if isinstance(module, BlipVisionEmbeddings): if hasattr(self.config, "vision_config"): std = self.config.vision_config.initializer_range init.trunc_normal_(module.position_embedding, mean=0.0, std=std) init.trunc_normal_(module.class_embedding, mean=0.0, std=std) elif isinstance(module, BlipTextEmbeddings): init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1))) class BlipEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`BlipEncoderLayer`]. Args: config (`BlipConfig`): The corresponding vision configuration for the `BlipEncoder`. """ def __init__(self, config: BlipConfig): super().__init__() self.config = config self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @auto_docstring def forward( self, inputs_embeds, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutput]: hidden_states = inputs_embeds for encoder_layer in self.layers: hidden_states = encoder_layer( hidden_states, **kwargs, ) return BaseModelOutput(last_hidden_state=hidden_states) class BlipVisionModel(BlipPreTrainedModel): main_input_name = "pixel_values" input_modalities = ("image",) config: BlipVisionConfig _can_record_outputs = { "hidden_states": BlipEncoderLayer, "attentions": BlipAttention, } def __init__(self, config: BlipVisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = BlipVisionEmbeddings(config) self.encoder = BlipEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @check_model_inputs(tie_last_hidden_states=False) @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutputWithPooling]: if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, **kwargs, ) last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, ) def get_input_embeddings(self): return self.embeddings @auto_docstring( custom_intro=""" This model is going to be deprecated in future versions. Please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase. """ ) class BlipModel(BlipPreTrainedModel): config: BlipConfig def __init__(self, config: BlipConfig): super().__init__(config) if not isinstance(config.text_config, BlipTextConfig): raise TypeError( "config.text_config is expected to be of type BlipTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, BlipVisionConfig): raise TypeError( "config.vision_config is expected to be of type BlipVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = BlipTextModel(text_config) self.vision_model = BlipVisionModel(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) logger.warning( "`BlipModel` is going to be deprecated in future release, please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase." ) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.text_model.get_input_embeddings() def set_input_embeddings(self, value): self.text_model.set_input_embeddings(value) @auto_docstring def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`]. Examples: ```python >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @auto_docstring def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @auto_docstring def get_multimodal_features( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: r""" Returns: multimodal_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The multimodal embeddings obtained by applying the image embeddings to the text encoder using the cross-attention mechanism. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["a photo of a cat", "a photo of a dog"] >>> inputs = processor(images=image, text=texts, padding=True, return_tensors="pt") >>> multimodal_features = model.get_multimodal_features(**inputs) ```""" vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, ) pooled_output = text_outputs[1] # pooled_output multimodal_features = self.text_projection(pooled_output) return multimodal_features @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BlipOutput]: r""" return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, **kwargs, ) image_embeds = vision_outputs.pooler_output image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs.pooler_output text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp().to(device=text_embeds.device) image_embeds = image_embeds.to(device=text_embeds.device, dtype=text_embeds.dtype) logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = blip_loss(logits_per_text) return BlipOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) @auto_docstring( custom_intro=""" BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise, the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption from the text input. If no text input is provided, the decoder will start with the [BOS] token only. """ ) class BlipForConditionalGeneration(BlipPreTrainedModel, GenerationMixin): config: BlipConfig main_input_name = "pixel_values" _tied_weights_keys = { "text_decoder.cls.predictions.decoder.bias": "text_decoder.cls.predictions.bias", "text_decoder.cls.predictions.decoder.weight": "text_decoder.bert.embeddings.word_embeddings.weight", } # TODO @arthurzucker check why we need this when for other models, their subPreTrainedModel handle it themselves. def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_decoder = BlipTextLMHeadModel(config.text_config) self.decoder_input_ids = config.text_config.bos_token_id self.decoder_pad_token_id = config.text_config.pad_token_id
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blip/image_processing_blip.py
src/transformers/models/blip/image_processing_blip.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for BLIP.""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) class BlipImageProcessor(BaseImageProcessor): r""" Constructs a BLIP image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 384, "width": 384} size = get_size_dict(size, default_to_square=True) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: Optional[PILImageResampling] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, do_convert_rgb: Optional[bool] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The shortest edge of the image is resized to `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest edge equal to `int(size["shortest_edge"] * (1333 / 800))`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = self.fetch_images(images) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor") validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_outputs __all__ = ["BlipImageProcessor"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py
src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def load_demo_image(image_size, device): img_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") transform = transforms.Compose( [ transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ] ) image = transform(raw_image).unsqueeze(0).to(device) return image def rename_key(key): if "visual_encoder" in key: key = re.sub("visual_encoder*", "vision_model.encoder", key) if "blocks" in key: key = re.sub(r"blocks", "layers", key) if "attn" in key: key = re.sub(r"attn", "self_attn", key) if "norm1" in key: key = re.sub(r"norm1", "layer_norm1", key) if "norm2" in key: key = re.sub(r"norm2", "layer_norm2", key) if "encoder.norm" in key: key = re.sub(r"encoder.norm", "post_layernorm", key) if "encoder.patch_embed.proj" in key: key = re.sub(r"encoder.patch_embed.proj", "embeddings.patch_embedding", key) if "encoder.pos_embed" in key: key = re.sub(r"encoder.pos_embed", "embeddings.position_embedding", key) if "encoder.cls_token" in key: key = re.sub(r"encoder.cls_token", "embeddings.class_embedding", key) if "self_attn" in key: key = re.sub(r"self_attn.proj", "self_attn.projection", key) return key @torch.no_grad() def convert_blip_checkpoint(pytorch_dump_folder_path, config_path=None): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = BlipConfig.from_pretrained(config_path) else: config = BlipConfig(projection_dim=512, text_config={}, vision_config={}) hf_model = BlipForConditionalGeneration(config).eval() model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" pt_model = blip_decoder(pretrained=model_url, image_size=384, vit="base") pt_model = pt_model.eval() modified_state_dict = pt_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_model.load_state_dict(modified_state_dict) image_size = 384 image = load_demo_image(image_size=image_size, device="cpu") tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") input_ids = tokenizer(["a picture of"]).input_ids out = hf_model.generate(image, input_ids) assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] out = hf_model.generate(image) assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(pytorch_dump_folder_path) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' model_url = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" ) vqa_model = blip_vqa(pretrained=model_url, image_size=image_size, vit="base") vqa_model.eval() modified_state_dict = vqa_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_vqa_model = BlipForQuestionAnswering(config) hf_vqa_model.load_state_dict(modified_state_dict) question = ["How many dogs are in this image?"] question_input_ids = tokenizer(question, return_tensors="pt").input_ids answer = hf_vqa_model.generate(question_input_ids, image) print(tokenizer.decode(answer[0])) assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa") model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" itm_model = blip_itm(pretrained=model_url, image_size=image_size, vit="base") itm_model.eval() modified_state_dict = itm_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_itm_model = BlipForImageTextRetrieval(config) question = ["A picture of a woman with a dog sitting in a beach"] question_input_ids = tokenizer( question, return_tensors="pt", padding="max_length", truncation=True, max_length=35, ).input_ids hf_itm_model.load_state_dict(modified_state_dict) hf_itm_model.eval() out_itm = hf_itm_model(question_input_ids, image, use_itm_head=True) out = hf_itm_model(question_input_ids, image, use_itm_head=False) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0], dim=1)[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") args = parser.parse_args() convert_blip_checkpoint(args.pytorch_dump_folder_path, args.config_path)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blip/image_processing_blip_fast.py
src/transformers/models/blip/image_processing_blip_fast.py
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for BLIP.""" from ...image_processing_utils_fast import BaseImageProcessorFast from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling from ...utils import auto_docstring @auto_docstring class BlipImageProcessorFast(BaseImageProcessorFast): # To be checked against the slow image processor # None values left after checking can be removed resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"height": 384, "width": 384} do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True __all__ = ["BlipImageProcessorFast"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/yolos/image_processing_yolos.py
src/transformers/models/yolos/image_processing_yolos.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for YOLOS.""" import pathlib from collections.abc import Iterable from typing import Any, Optional, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import BaseImageProcessor, get_size_dict from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_annotations, validate_kwargs, validate_preprocess_arguments, ) from ...processing_utils import ImagesKwargs from ...utils import ( TensorType, is_scipy_available, is_torch_available, is_torch_tensor, is_vision_available, logging, ) from ...utils.import_utils import requires if is_torch_available(): import torch from torch import nn if is_vision_available(): import PIL if is_scipy_available(): import scipy.special import scipy.stats logger = logging.get_logger(__name__) SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) class YolosImageProcessorKwargs(ImagesKwargs, total=False): r""" format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_convert_annotations (`bool`, *optional*, defaults to `True`): Controls whether to convert the annotations to the format expected by the YOLOS model. Converts the bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`. Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method. return_segmentation_masks (`bool`, *optional*, defaults to `False`): Whether to return segmentation masks. annotations (`AnnotationType` or `list[AnnotationType]`, *optional*): Annotations to transform according to the padding that is applied to the images. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. """ format: Union[str, AnnotationFormat] do_convert_annotations: bool return_segmentation_masks: bool annotations: Optional[Union[AnnotationType, list[AnnotationType]]] masks_path: Optional[Union[str, pathlib.Path]] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width( images: list[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> list[int]: """ Get the maximum height and width across all images in a batch. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_data_format}") return (max_height, max_width) def get_size_with_aspect_ratio( image_size: tuple[int, int], size: int, max_size: Optional[int] = None, mod_size: int = 16 ) -> tuple[int, int]: """ Computes the output image size given the input image size and the desired output size with multiple of divisible_size. Args: image_size (`tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. mod_size (`int`, *optional*): The size to make multiple of mod_size. """ height, width = image_size raw_size = None if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: raw_size = max_size * min_original_size / max_original_size size = int(round(raw_size)) if width < height: ow = size if max_size is not None and raw_size is not None: oh = int(raw_size * height / width) else: oh = int(size * height / width) elif (height <= width and height == size) or (width <= height and width == size): oh, ow = height, width else: oh = size if max_size is not None and raw_size is not None: ow = int(raw_size * width / height) else: ow = int(size * width / height) if mod_size is not None: ow_mod = np.mod(ow, mod_size) oh_mod = np.mod(oh, mod_size) ow = ow - ow_mod oh = oh - oh_mod return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_image_size_for_max_height_width def get_image_size_for_max_height_width( input_image: np.ndarray, max_height: int, max_width: int, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> tuple[int, int]: """ Computes the output image size given the input image and the maximum allowed height and width. Keep aspect ratio. Important, even if image_height < max_height and image_width < max_width, the image will be resized to at least one of the edges be equal to max_height or max_width. For example: - input_size: (100, 200), max_height: 50, max_width: 50 -> output_size: (25, 50) - input_size: (100, 200), max_height: 200, max_width: 500 -> output_size: (200, 400) Args: input_image (`np.ndarray`): The image to resize. max_height (`int`): The maximum allowed height. max_width (`int`): The maximum allowed width. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ image_size = get_image_size(input_image, input_data_format) height, width = image_size height_scale = max_height / height width_scale = max_width / width min_scale = min(height_scale, width_scale) new_height = int(height * min_scale) new_width = int(width * min_scale) return new_height, new_width # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, tuple[int, int], list[int]], max_size: Optional[int] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: input_image (`np.ndarray`): The image to resize. size (`int` or `tuple[int, int]` or `list[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ image_size = get_image_size(input_image, input_data_format) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: dict, image_size: tuple[int, int]) -> dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> list[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask( image: np.ndarray, output_size: tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`list[list[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation def prepare_coco_detection_annotation( image, target, return_segmentation_masks: bool = False, input_data_format: Optional[Union[ChannelDimension, str]] = None, ): """ Convert the target in COCO format into the format expected by DETR. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj.get("iscrowd", 0) for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] # Converting the filtered keypoints list to a numpy array keypoints = np.asarray(keypoints, dtype=np.float32) # Apply the keep mask here to filter the relevant annotations keypoints = keypoints[keep] num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->YOLOS def prepare_coco_panoptic_annotation( image: np.ndarray, target: dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True, input_data_format: Union[ChannelDimension, str] = None, ) -> dict: """ Prepare a coco panoptic annotation for YOLOS. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image def get_segmentation_image( masks: np.ndarray, input_size: tuple, target_size: tuple, stuff_equiv_classes, deduplicate=False ): h, w = input_size final_h, final_w = target_size m_id = scipy.special.softmax(masks.transpose(0, 1), -1) if m_id.shape[-1] == 0: # We didn't detect any mask :( m_id = np.zeros((h, w), dtype=np.int64) else: m_id = m_id.argmax(-1).reshape(h, w) if deduplicate: # Merge the masks corresponding to the same stuff class for equiv in stuff_equiv_classes.values(): for eq_id in equiv: m_id[m_id == eq_id] = equiv[0] seg_img = id_to_rgb(m_id) seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) return seg_img # Copied from transformers.models.detr.image_processing_detr.get_mask_area def get_mask_area(seg_img: np.ndarray, target_size: tuple[int, int], n_classes: int) -> np.ndarray: final_h, final_w = target_size np_seg_img = seg_img.astype(np.uint8) np_seg_img = np_seg_img.reshape(final_h, final_w, 3) m_id = rgb_to_id(np_seg_img) area = [(m_id == i).sum() for i in range(n_classes)] return area # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities def score_labels_from_class_probabilities(logits: np.ndarray) -> tuple[np.ndarray, np.ndarray]: probs = scipy.special.softmax(logits, axis=-1) labels = probs.argmax(-1, keepdims=True) scores = np.take_along_axis(probs, labels, axis=-1) scores, labels = scores.squeeze(-1), labels.squeeze(-1) return scores, labels # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: dict[str, Any], orig_size: tuple[int, int], target_size: tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`dict[str, Any]`): The annotation dictionary. orig_size (`tuple[int, int]`): The original size of the input image. target_size (`tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `list[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[set[int]] = None, target_size: Optional[tuple[int, int]] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: list[dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments @requires(backends=("vision",)) class YolosImageProcessor(BaseImageProcessor): r""" Constructs a Detr image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. pad_size (`dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ model_input_names = ["pixel_values", "pixel_mask"] valid_kwargs = YolosImageProcessorKwargs def __init__( self, format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_annotations: Optional[bool] = None, do_pad: bool = True, pad_size: Optional[dict[str, int]] = None, **kwargs, ) -> None: max_size = None if size is None else kwargs.pop("max_size", 1333) size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, max_size=max_size, default_to_square=False) # Backwards compatibility if do_convert_annotations is None: do_convert_annotations = do_normalize super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.do_convert_annotations = do_convert_annotations self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = kwargs.pop("pad_and_return_pixel_mask", do_pad) self.pad_size = pad_size self._valid_processor_keys = [ "images", "annotations", "return_segmentation_masks", "masks_path", "do_resize", "size", "resample", "do_rescale", "rescale_factor", "do_normalize", "image_mean", "image_std", "do_convert_annotations",
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/yolos/modeling_yolos.py
src/transformers/models/yolos/modeling_yolos.py
# coding=utf-8 # Copyright 2022 School of EIC, Huazhong University of Science & Technology and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch YOLOS model.""" import collections.abc from collections.abc import Callable from dataclasses import dataclass from typing import Optional import torch from torch import nn from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_yolos import YolosConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Output type of [`YolosForObjectDetection`]. """ ) class YolosObjectDetectionOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~YolosImageProcessor.post_process`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None auxiliary_outputs: Optional[list[dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None class YolosEmbeddings(nn.Module): """ Construct the CLS token, detection tokens, position and patch embeddings. """ def __init__(self, config: YolosConfig) -> None: super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.detection_tokens = nn.Parameter(torch.zeros(1, config.num_detection_tokens, config.hidden_size)) self.patch_embeddings = YolosPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter( torch.zeros(1, num_patches + config.num_detection_tokens + 1, config.hidden_size) ) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.interpolation = InterpolateInitialPositionEmbeddings(config) self.config = config def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values) batch_size, seq_len, _ = embeddings.size() # add the [CLS] and detection tokens to the embedded patch tokens cls_tokens = self.cls_token.expand(batch_size, -1, -1) detection_tokens = self.detection_tokens.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings, detection_tokens), dim=1) # add positional encoding to each token # this might require interpolation of the existing position embeddings position_embeddings = self.interpolation(self.position_embeddings, (height, width)) embeddings = embeddings + position_embeddings embeddings = self.dropout(embeddings) return embeddings class InterpolateInitialPositionEmbeddings(nn.Module): def __init__(self, config) -> None: super().__init__() self.config = config def forward(self, pos_embed, img_size=(800, 1344)) -> torch.Tensor: cls_pos_embed = pos_embed[:, 0, :] cls_pos_embed = cls_pos_embed[:, None] det_pos_embed = pos_embed[:, -self.config.num_detection_tokens :, :] patch_pos_embed = pos_embed[:, 1 : -self.config.num_detection_tokens, :] patch_pos_embed = patch_pos_embed.transpose(1, 2) batch_size, hidden_size, seq_len = patch_pos_embed.shape patch_height, patch_width = ( self.config.image_size[0] // self.config.patch_size, self.config.image_size[1] // self.config.patch_size, ) patch_pos_embed = patch_pos_embed.view(batch_size, hidden_size, patch_height, patch_width) height, width = img_size new_patch_height, new_patch_width = height // self.config.patch_size, width // self.config.patch_size patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_patch_height, new_patch_width), mode="bicubic", align_corners=False ) patch_pos_embed = patch_pos_embed.flatten(2).transpose(1, 2) scale_pos_embed = torch.cat((cls_pos_embed, patch_pos_embed, det_pos_embed), dim=1) return scale_pos_embed class InterpolateMidPositionEmbeddings(nn.Module): def __init__(self, config) -> None: super().__init__() self.config = config def forward(self, pos_embed, img_size=(800, 1344)) -> torch.Tensor: cls_pos_embed = pos_embed[:, :, 0, :] cls_pos_embed = cls_pos_embed[:, None] det_pos_embed = pos_embed[:, :, -self.config.num_detection_tokens :, :] patch_pos_embed = pos_embed[:, :, 1 : -self.config.num_detection_tokens, :] patch_pos_embed = patch_pos_embed.transpose(2, 3) depth, batch_size, hidden_size, seq_len = patch_pos_embed.shape patch_height, patch_width = ( self.config.image_size[0] // self.config.patch_size, self.config.image_size[1] // self.config.patch_size, ) patch_pos_embed = patch_pos_embed.view(depth * batch_size, hidden_size, patch_height, patch_width) height, width = img_size new_patch_height, new_patch_width = height // self.config.patch_size, width // self.config.patch_size patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_patch_height, new_patch_width), mode="bicubic", align_corners=False ) patch_pos_embed = ( patch_pos_embed.flatten(2) .transpose(1, 2) .contiguous() .view(depth, batch_size, new_patch_height * new_patch_width, hidden_size) ) scale_pos_embed = torch.cat((cls_pos_embed, patch_pos_embed, det_pos_embed), dim=2) return scale_pos_embed class YolosPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings # Copied from transformers.models.bert.modeling_bert.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: Optional[float] = None, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): if scaling is None: scaling = query.size(-1) ** -0.5 # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attention_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Yolos class YolosSelfAttention(nn.Module): def __init__(self, config: YolosConfig): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.dropout_prob = config.attention_probs_dropout_prob self.scaling = self.attention_head_size**-0.5 self.is_causal = False self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: batch_size = hidden_states.shape[0] new_shape = batch_size, -1, self.num_attention_heads, self.attention_head_size key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2) value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2) query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] context_layer, attention_probs = attention_interface( self, query_layer, key_layer, value_layer, None, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob, ) new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.reshape(new_context_layer_shape) return context_layer, attention_probs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Yolos class YolosSelfOutput(nn.Module): """ The residual connection is defined in YolosLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: YolosConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Yolos class YolosAttention(nn.Module): def __init__(self, config: YolosConfig): super().__init__() self.attention = YolosSelfAttention(config) self.output = YolosSelfOutput(config) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: self_attn_output, _ = self.attention(hidden_states) output = self.output(self_attn_output, hidden_states) return output # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->Yolos class YolosIntermediate(nn.Module): def __init__(self, config: YolosConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->Yolos class YolosOutput(nn.Module): def __init__(self, config: YolosConfig): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->Yolos,VIT->YOLOS class YolosLayer(GradientCheckpointingLayer): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: YolosConfig): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = YolosAttention(config) self.intermediate = YolosIntermediate(config) self.output = YolosOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states_norm = self.layernorm_before(hidden_states) attention_output = self.attention(hidden_states_norm) # first residual connection hidden_states = attention_output + hidden_states # in Yolos, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) return layer_output class YolosEncoder(nn.Module): def __init__(self, config: YolosConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([YolosLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False seq_length = ( 1 + (config.image_size[0] * config.image_size[1] // config.patch_size**2) + config.num_detection_tokens ) self.mid_position_embeddings = ( nn.Parameter( torch.zeros( config.num_hidden_layers - 1, 1, seq_length, config.hidden_size, ) ) if config.use_mid_position_embeddings else None ) self.interpolation = InterpolateMidPositionEmbeddings(config) if config.use_mid_position_embeddings else None def forward( self, hidden_states: torch.Tensor, height: int, width: int, ) -> BaseModelOutput: if self.config.use_mid_position_embeddings: interpolated_mid_position_embeddings = self.interpolation(self.mid_position_embeddings, (height, width)) for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states) if self.config.use_mid_position_embeddings: if i < (self.config.num_hidden_layers - 1): hidden_states = hidden_states + interpolated_mid_position_embeddings[i] return BaseModelOutput(last_hidden_state=hidden_states) @auto_docstring class YolosPreTrainedModel(PreTrainedModel): config: YolosConfig base_model_prefix = "vit" main_input_name = "pixel_values" input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = [] _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": YolosLayer, "attentions": YolosSelfAttention, } @auto_docstring class YolosModel(YolosPreTrainedModel): def __init__(self, config: YolosConfig, add_pooling_layer: bool = True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = YolosEmbeddings(config) self.encoder = YolosEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = YolosPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> YolosPatchEmbeddings: return self.embeddings.patch_embeddings @check_model_inputs(tie_last_hidden_states=False) @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) height, width = pixel_values.shape[-2:] encoder_outputs: BaseModelOutput = self.encoder(embedding_output, height=height, width=width) sequence_output = encoder_outputs.last_hidden_state sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output) class YolosPooler(nn.Module): def __init__(self, config: YolosConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->Yolos class YolosMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x @auto_docstring( custom_intro=""" YOLOS Model (consisting of a ViT encoder) with object detection heads on top, for tasks such as COCO detection. """ ) class YolosForObjectDetection(YolosPreTrainedModel): def __init__(self, config: YolosConfig): super().__init__(config) # YOLOS (ViT) encoder model self.vit = YolosModel(config, add_pooling_layer=False) # Object detection heads # We add one for the "no object" class self.class_labels_classifier = YolosMLPPredictionHead( input_dim=config.hidden_size, hidden_dim=config.hidden_size, output_dim=config.num_labels + 1, num_layers=3 ) self.bbox_predictor = YolosMLPPredictionHead( input_dim=config.hidden_size, hidden_dim=config.hidden_size, output_dim=4, num_layers=3 ) # Initialize weights and apply final processing self.post_init() # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py def _set_aux_loss(self, outputs_class, outputs_coord): return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @can_return_tuple @auto_docstring def forward( self, pixel_values: torch.FloatTensor, labels: Optional[list[dict]] = None, **kwargs: Unpack[TransformersKwargs], ) -> YolosObjectDetectionOutput: r""" labels (`list[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: `'class_labels'` and `'boxes'` (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Examples: ```python >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("hustvl/yolos-tiny") >>> model = AutoModelForObjectDetection.from_pretrained("hustvl/yolos-tiny") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected remote with confidence 0.991 at location [46.48, 72.78, 178.98, 119.3] Detected remote with confidence 0.908 at location [336.48, 79.27, 368.23, 192.36] Detected cat with confidence 0.934 at location [337.18, 18.06, 638.14, 373.09] Detected cat with confidence 0.979 at location [10.93, 53.74, 313.41, 470.67] Detected remote with confidence 0.974 at location [41.63, 72.23, 178.09, 119.99] ```""" # First, sent images through YOLOS base model to obtain hidden states outputs: BaseModelOutputWithPooling = self.vit(pixel_values, **kwargs) sequence_output = outputs.last_hidden_state # Take the final hidden states of the detection tokens sequence_output = sequence_output[:, -self.config.num_detection_tokens :, :] # Class logits + predicted bounding boxes logits = self.class_labels_classifier(sequence_output) pred_boxes = self.bbox_predictor(sequence_output).sigmoid() loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: outputs_class, outputs_coord = None, None if self.config.auxiliary_loss: intermediate = outputs.hidden_states outputs_class = self.class_labels_classifier(intermediate) outputs_coord = self.bbox_predictor(intermediate).sigmoid() loss, loss_dict, auxiliary_outputs = self.loss_function( logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord ) return YolosObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/yolos/configuration_yolos.py
src/transformers/models/yolos/configuration_yolos.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """YOLOS model configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class YolosConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`YolosModel`]. It is used to instantiate a YOLOS model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the YOLOS [hustvl/yolos-base](https://huggingface.co/hustvl/yolos-base) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`list[int]`, *optional*, defaults to `[512, 864]`): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. num_detection_tokens (`int`, *optional*, defaults to 100): The number of detection tokens. use_mid_position_embeddings (`bool`, *optional*, defaults to `True`): Whether to use the mid-layer position encodings. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. Example: ```python >>> from transformers import YolosConfig, YolosModel >>> # Initializing a YOLOS hustvl/yolos-base style configuration >>> configuration = YolosConfig() >>> # Initializing a model (with random weights) from the hustvl/yolos-base style configuration >>> model = YolosModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "yolos" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=[512, 864], patch_size=16, num_channels=3, qkv_bias=True, num_detection_tokens=100, use_mid_position_embeddings=True, auxiliary_loss=False, class_cost=1, bbox_cost=5, giou_cost=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.num_detection_tokens = num_detection_tokens self.use_mid_position_embeddings = use_mid_position_embeddings self.auxiliary_loss = auxiliary_loss # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient __all__ = ["YolosConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/yolos/convert_yolos_to_pytorch.py
src/transformers/models/yolos/convert_yolos_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert YOLOS checkpoints from the original repository. URL: https://github.com/hustvl/YOLOS""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_yolos_config(yolos_name: str) -> YolosConfig: config = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: config.hidden_size = 192 config.intermediate_size = 768 config.num_hidden_layers = 12 config.num_attention_heads = 3 config.image_size = [800, 1333] config.use_mid_position_embeddings = False elif yolos_name == "yolos_s_dWr": config.hidden_size = 330 config.num_hidden_layers = 14 config.num_attention_heads = 6 config.intermediate_size = 1320 elif "yolos_s" in yolos_name: config.hidden_size = 384 config.intermediate_size = 1536 config.num_hidden_layers = 12 config.num_attention_heads = 6 elif "yolos_b" in yolos_name: config.image_size = [800, 1344] config.num_labels = 91 repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict: dict, config: YolosConfig, base_model: bool = False): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :] state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def rename_key(name: str) -> str: if "backbone" in name: name = name.replace("backbone", "vit") if "cls_token" in name: name = name.replace("cls_token", "embeddings.cls_token") if "det_token" in name: name = name.replace("det_token", "embeddings.detection_tokens") if "mid_pos_embed" in name: name = name.replace("mid_pos_embed", "encoder.mid_position_embeddings") if "pos_embed" in name: name = name.replace("pos_embed", "embeddings.position_embeddings") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") if "blocks" in name: name = name.replace("blocks", "encoder.layer") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "class_embed" in name: name = name.replace("class_embed", "class_labels_classifier") if "bbox_embed" in name: name = name.replace("bbox_embed", "bbox_predictor") if "vit.norm" in name: name = name.replace("vit.norm", "vit.layernorm") return name def convert_state_dict(orig_state_dict: dict, model: YolosForObjectDetection) -> dict: for key in orig_state_dict.copy(): val = orig_state_dict.pop(key) if "qkv" in key: key_split = key.split(".") layer_num = int(key_split[2]) dim = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.key.weight"] = val[ dim : dim * 2, : ] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.query.bias"] = val[:dim] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.key.bias"] = val[dim : dim * 2] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.value.bias"] = val[-dim:] else: orig_state_dict[rename_key(key)] = val return orig_state_dict # We will verify our results on an image of cute cats def prepare_img() -> torch.Tensor: url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_yolos_checkpoint( yolos_name: str, checkpoint_path: str, pytorch_dump_folder_path: str, push_to_hub: bool = False ): """ Copy/paste/tweak model's weights to our YOLOS structure. """ config = get_yolos_config(yolos_name) # load original state_dict state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model"] # load πŸ€— model model = YolosForObjectDetection(config) model.eval() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) # Check outputs on an image, prepared by YolosImageProcessor size = 800 if yolos_name != "yolos_ti" else 512 image_processor = YolosImageProcessor(format="coco_detection", size=size) encoding = image_processor(images=prepare_img(), return_tensors="pt") outputs = model(**encoding) logits, pred_boxes = outputs.logits, outputs.pred_boxes expected_slice_logits, expected_slice_boxes = None, None if yolos_name == "yolos_ti": expected_slice_logits = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) expected_slice_boxes = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": expected_slice_logits = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) expected_slice_boxes = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": expected_slice_logits = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) expected_slice_boxes = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": expected_slice_logits = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) expected_slice_boxes = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": expected_slice_logits = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) expected_slice_boxes = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}") assert torch.allclose(logits[0, :3, :3], expected_slice_logits, atol=1e-4) assert torch.allclose(pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model_mapping = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub...") model_name = model_mapping[yolos_name] image_processor.push_to_hub(repo_id=f"hustvl/{model_name}") model.push_to_hub(repo_id=f"hustvl/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the Hugging Face hub.", ) args = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/yolos/image_processing_yolos_fast.py
src/transformers/models/yolos/image_processing_yolos_fast.py
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/yolos/modular_yolos.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_yolos.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 import pathlib from typing import Any, Optional, Union import torch from torchvision.io import read_image from torchvision.transforms.v2 import functional as F from ...image_processing_utils import BatchFeature, get_size_dict from ...image_processing_utils_fast import ( BaseImageProcessorFast, SizeDict, get_image_size_for_max_height_width, get_max_height_width, safe_squeeze, ) from ...image_transforms import center_to_corners_format, corners_to_center_format from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, PILImageResampling, get_image_size, validate_annotations, ) from ...processing_utils import Unpack from ...utils import TensorType, auto_docstring from ...utils.import_utils import requires from .image_processing_yolos import YolosImageProcessorKwargs SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) # inspired by https://github.com/facebookresearch/yolos/blob/master/datasets/coco.py#L33 def convert_coco_poly_to_mask(segmentations, height: int, width: int, device: torch.device) -> torch.Tensor: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`list[list[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = torch.as_tensor(mask, dtype=torch.uint8, device=device) mask = torch.any(mask, axis=2) masks.append(mask) if masks: masks = torch.stack(masks, axis=0) else: masks = torch.zeros((0, height, width), dtype=torch.uint8, device=device) return masks # inspired by https://github.com/facebookresearch/yolos/blob/master/datasets/coco.py#L50 def prepare_coco_detection_annotation( image, target, return_segmentation_masks: bool = False, input_data_format: Optional[Union[ChannelDimension, str]] = None, ): """ Convert the target in COCO format into the format expected by YOLOS. """ image_height, image_width = image.size()[-2:] image_id = target["image_id"] image_id = torch.as_tensor([image_id], dtype=torch.int64, device=image.device) # Get all COCO annotations for the given image. annotations = target["annotations"] classes = [] area = [] boxes = [] keypoints = [] for obj in annotations: if "iscrowd" not in obj or obj["iscrowd"] == 0: classes.append(obj["category_id"]) area.append(obj["area"]) boxes.append(obj["bbox"]) if "keypoints" in obj: keypoints.append(obj["keypoints"]) classes = torch.as_tensor(classes, dtype=torch.int64, device=image.device) area = torch.as_tensor(area, dtype=torch.float32, device=image.device) iscrowd = torch.zeros_like(classes, dtype=torch.int64, device=image.device) # guard against no boxes via resizing boxes = torch.as_tensor(boxes, dtype=torch.float32, device=image.device).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = { "image_id": image_id, "class_labels": classes[keep], "boxes": boxes[keep], "area": area[keep], "iscrowd": iscrowd[keep], "orig_size": torch.as_tensor([int(image_height), int(image_width)], dtype=torch.int64, device=image.device), } if keypoints: keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=image.device) # Apply the keep mask here to filter the relevant annotations keypoints = keypoints[keep] num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width, device=image.device) new_target["masks"] = masks[keep] return new_target def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.numel() == 0: return torch.zeros((0, 4), device=masks.device) h, w = masks.shape[-2:] y = torch.arange(0, h, dtype=torch.float32, device=masks.device) x = torch.arange(0, w, dtype=torch.float32, device=masks.device) # see https://github.com/pytorch/pytorch/issues/50276 y, x = torch.meshgrid(y, x, indexing="ij") x_mask = masks * torch.unsqueeze(x, 0) x_max = x_mask.view(x_mask.shape[0], -1).max(-1)[0] x_min = ( torch.where(masks, x.unsqueeze(0), torch.tensor(1e8, device=masks.device)).view(masks.shape[0], -1).min(-1)[0] ) y_mask = masks * torch.unsqueeze(y, 0) y_max = y_mask.view(y_mask.shape[0], -1).max(-1)[0] y_min = ( torch.where(masks, y.unsqueeze(0), torch.tensor(1e8, device=masks.device)).view(masks.shape[0], -1).min(-1)[0] ) return torch.stack([x_min, y_min, x_max, y_max], 1) # 2 functions below adapted from https://github.com/cocodataset/panopticapi/blob/master/panopticapi/utils.py # Copyright (c) 2018, Alexander Kirillov # All rights reserved. def rgb_to_id(color): """ Converts RGB color to unique ID. """ if isinstance(color, torch.Tensor) and len(color.shape) == 3: if color.dtype == torch.uint8: color = color.to(torch.int32) return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) def prepare_coco_panoptic_annotation( image: torch.Tensor, target: dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True, input_data_format: Union[ChannelDimension, str] = None, ) -> dict: """ Prepare a coco panoptic annotation for YOLOS. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = torch.as_tensor( [target["image_id"] if "image_id" in target else target["id"]], dtype=torch.int64, device=image.device ) new_target["size"] = torch.as_tensor([image_height, image_width], dtype=torch.int64, device=image.device) new_target["orig_size"] = torch.as_tensor([image_height, image_width], dtype=torch.int64, device=image.device) if "segments_info" in target: masks = read_image(annotation_path).permute(1, 2, 0).to(dtype=torch.int32, device=image.device) masks = rgb_to_id(masks) ids = torch.as_tensor([segment_info["id"] for segment_info in target["segments_info"]], device=image.device) masks = masks == ids[:, None, None] masks = masks.to(torch.bool) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = torch.as_tensor( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=torch.int64, device=image.device, ) new_target["iscrowd"] = torch.as_tensor( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=torch.int64, device=image.device, ) new_target["area"] = torch.as_tensor( [segment_info["area"] for segment_info in target["segments_info"]], dtype=torch.float32, device=image.device, ) return new_target def get_size_with_aspect_ratio( image_size: tuple[int, int], size: int, max_size: Optional[int] = None, mod_size: int = 16 ) -> tuple[int, int]: """ Computes the output image size given the input image size and the desired output size with multiple of divisible_size. Args: image_size (`tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. mod_size (`int`, *optional*): The size to make multiple of mod_size. """ height, width = image_size raw_size = None if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: raw_size = max_size * min_original_size / max_original_size size = int(round(raw_size)) if width < height: ow = size if max_size is not None and raw_size is not None: oh = int(raw_size * height / width) else: oh = int(size * height / width) elif (height <= width and height == size) or (width <= height and width == size): oh, ow = height, width else: oh = size if max_size is not None and raw_size is not None: ow = int(raw_size * width / height) else: ow = int(size * width / height) if mod_size is not None: ow_mod = torch.remainder(torch.tensor(ow), mod_size).item() oh_mod = torch.remainder(torch.tensor(oh), mod_size).item() ow = ow - ow_mod oh = oh - oh_mod return (oh, ow) @auto_docstring @requires(backends=("torchvision", "torch")) class YolosImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_DEFAULT_MEAN image_std = IMAGENET_DEFAULT_STD format = AnnotationFormat.COCO_DETECTION do_resize = True do_rescale = True do_normalize = True do_pad = True size = {"shortest_edge": 800, "longest_edge": 1333} default_to_square = False model_input_names = ["pixel_values", "pixel_mask"] valid_kwargs = YolosImageProcessorKwargs def __init__(self, **kwargs: Unpack[YolosImageProcessorKwargs]) -> None: kwargs.setdefault("do_pad", kwargs.pop("pad_and_return_pixel_mask", self.do_pad)) size = kwargs.pop("size", None) max_size = None if size is None else kwargs.pop("max_size", 1333) size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} self.size = get_size_dict(size, max_size=max_size, default_to_square=False) # Backwards compatibility do_convert_annotations = kwargs.get("do_convert_annotations") do_normalize = kwargs.get("do_normalize") if do_convert_annotations is None and getattr(self, "do_convert_annotations", None) is None: self.do_convert_annotations = do_normalize if do_normalize is not None else self.do_normalize super().__init__(**kwargs) def prepare_annotation( self, image: torch.Tensor, target: dict, format: Optional[AnnotationFormat] = None, return_segmentation_masks: Optional[bool] = None, masks_path: Optional[Union[str, pathlib.Path]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> dict: """ Prepare an annotation for feeding into YOLOS model. """ format = format if format is not None else self.format if format == AnnotationFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation( image, target, return_segmentation_masks, input_data_format=input_data_format ) elif format == AnnotationFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format, ) else: raise ValueError(f"Format {format} is not supported.") return target def resize( self, image: torch.Tensor, size: SizeDict, interpolation: Optional["F.InterpolationMode"] = None, **kwargs, ) -> torch.Tensor: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. Args: image (`torch.Tensor`): Image to resize. size (`SizeDict`): Size of the image's `(height, width)` dimensions after resizing. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): Resampling filter to use if resizing the image. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR if size.shortest_edge and size.longest_edge: # Resize the image so that the shortest edge or the longest edge is of the given size # while maintaining the aspect ratio of the original image. new_size = get_size_with_aspect_ratio( image.size()[-2:], size["shortest_edge"], size["longest_edge"], ) elif size.max_height and size.max_width: new_size = get_image_size_for_max_height_width(image.size()[-2:], size["max_height"], size["max_width"]) elif size.height and size.width: new_size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = F.resize( image, size=new_size, interpolation=interpolation, **kwargs, ) return image def resize_annotation( self, annotation: dict[str, Any], orig_size: tuple[int, int], target_size: tuple[int, int], threshold: float = 0.5, interpolation: Optional["F.InterpolationMode"] = None, ): """ Resizes an annotation to a target size. Args: annotation (`dict[str, Any]`): The annotation dictionary. orig_size (`tuple[int, int]`): The original size of the input image. target_size (`tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`InterpolationMode`, defaults to `F.InterpolationMode.NEAREST_EXACT`): The resampling filter to use when resizing the masks. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.NEAREST_EXACT ratio_height, ratio_width = [target / orig for target, orig in zip(target_size, orig_size)] new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * torch.as_tensor( [ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32, device=boxes.device ) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = [F.resize(mask, target_size, interpolation=interpolation) for mask in masks] masks = torch.stack(masks).to(torch.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation def normalize_annotation(self, annotation: dict, image_size: tuple[int, int]) -> dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= torch.as_tensor( [image_width, image_height, image_width, image_height], dtype=torch.float32, device=boxes.device ) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation def _update_annotation_for_padded_image( self, annotation: dict, input_image_size: tuple[int, int], output_image_size: tuple[int, int], padding, update_bboxes, ) -> dict: """ Update the annotation for a padded image. """ new_annotation = {} new_annotation["size"] = output_image_size ratio_height, ratio_width = (input / output for output, input in zip(output_image_size, input_image_size)) for key, value in annotation.items(): if key == "masks": masks = value masks = F.pad( masks, padding, fill=0, ) masks = safe_squeeze(masks, 1) new_annotation["masks"] = masks elif key == "boxes" and update_bboxes: boxes = value boxes *= torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height], device=boxes.device) new_annotation["boxes"] = boxes elif key == "size": new_annotation["size"] = output_image_size else: new_annotation[key] = value return new_annotation def pad( self, image: torch.Tensor, padded_size: tuple[int, int], annotation: Optional[dict[str, Any]] = None, update_bboxes: bool = True, fill: int = 0, ): original_size = image.size()[-2:] padding_bottom = padded_size[0] - original_size[0] padding_right = padded_size[1] - original_size[1] if padding_bottom < 0 or padding_right < 0: raise ValueError( f"Padding dimensions are negative. Please make sure that the padded size is larger than the " f"original size. Got padded size: {padded_size}, original size: {original_size}." ) if original_size != padded_size: padding = [0, 0, padding_right, padding_bottom] image = F.pad(image, padding, fill=fill) if annotation is not None: annotation = self._update_annotation_for_padded_image( annotation, original_size, padded_size, padding, update_bboxes ) # Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. pixel_mask = torch.zeros(padded_size, dtype=torch.int64, device=image.device) pixel_mask[: original_size[0], : original_size[1]] = 1 return image, pixel_mask, annotation def _preprocess( self, images: list["torch.Tensor"], annotations: Optional[Union[AnnotationType, list[AnnotationType]]], masks_path: Optional[Union[str, pathlib.Path]], return_segmentation_masks: bool, do_resize: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], do_rescale: bool, rescale_factor: float, do_normalize: bool, do_convert_annotations: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, pad_size: Optional[SizeDict], format: Optional[Union[str, AnnotationFormat]], return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. """ if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) format = AnnotationFormat(format) if annotations is not None: validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) if ( masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) data = {} processed_images = [] processed_annotations = [] pixel_masks = [] # Initialize pixel_masks here for image, annotation in zip(images, annotations if annotations is not None else [None] * len(images)): # prepare (COCO annotations as a list of Dict -> YOLOS target as a single Dict per image) if annotations is not None: annotation = self.prepare_annotation( image, annotation, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=ChannelDimension.FIRST, ) if do_resize: resized_image = self.resize(image, size=size, interpolation=interpolation) if annotations is not None: annotation = self.resize_annotation( annotation, orig_size=image.size()[-2:], target_size=resized_image.size()[-2:], ) image = resized_image # Fused rescale and normalize image = self.rescale_and_normalize(image, do_rescale, rescale_factor, do_normalize, image_mean, image_std) if do_convert_annotations and annotations is not None: annotation = self.normalize_annotation(annotation, get_image_size(image, ChannelDimension.FIRST)) processed_images.append(image) processed_annotations.append(annotation) images = processed_images annotations = processed_annotations if annotations is not None else None if do_pad: # depends on all resized image shapes so we need another loop if pad_size is not None: padded_size = (pad_size.height, pad_size.width) else: padded_size = get_max_height_width(images) padded_images = [] padded_annotations = [] for image, annotation in zip(images, annotations if annotations is not None else [None] * len(images)): # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} if padded_size == image.size()[-2:]: padded_images.append(image) pixel_masks.append(torch.ones(padded_size, dtype=torch.int64, device=image.device)) padded_annotations.append(annotation) continue image, pixel_mask, annotation = self.pad( image, padded_size, annotation=annotation, update_bboxes=do_convert_annotations ) padded_images.append(image) padded_annotations.append(annotation) pixel_masks.append(pixel_mask) images = padded_images annotations = padded_annotations if annotations is not None else None data.update({"pixel_mask": torch.stack(pixel_masks, dim=0)}) data.update({"pixel_values": torch.stack(images, dim=0)}) encoded_inputs = BatchFeature(data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, list[tuple]] = None, top_k: int = 100 ): """ Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`YolosObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. top_k (`int`, *optional*, defaults to 100): Keep only top k bounding boxes before filtering by thresholding. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() prob = prob.view(out_logits.shape[0], -1) k_value = min(top_k, prob.size(1)) topk_values, topk_indexes = torch.topk(prob, k_value, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor") labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, list): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results __all__ = ["YolosImageProcessorFast"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/yolos/modular_yolos.py
src/transformers/models/yolos/modular_yolos.py
from typing import Optional, Union import torch from transformers.models.detr.image_processing_detr_fast import DetrImageProcessorFast from ...image_transforms import center_to_corners_format from ...utils import ( TensorType, logging, ) logger = logging.get_logger(__name__) def get_size_with_aspect_ratio( image_size: tuple[int, int], size: int, max_size: Optional[int] = None, mod_size: int = 16 ) -> tuple[int, int]: """ Computes the output image size given the input image size and the desired output size with multiple of divisible_size. Args: image_size (`tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. mod_size (`int`, *optional*): The size to make multiple of mod_size. """ height, width = image_size raw_size = None if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: raw_size = max_size * min_original_size / max_original_size size = int(round(raw_size)) if width < height: ow = size if max_size is not None and raw_size is not None: oh = int(raw_size * height / width) else: oh = int(size * height / width) elif (height <= width and height == size) or (width <= height and width == size): oh, ow = height, width else: oh = size if max_size is not None and raw_size is not None: ow = int(raw_size * width / height) else: ow = int(size * width / height) if mod_size is not None: ow_mod = torch.remainder(torch.tensor(ow), mod_size).item() oh_mod = torch.remainder(torch.tensor(oh), mod_size).item() ow = ow - ow_mod oh = oh - oh_mod return (oh, ow) class YolosImageProcessorFast(DetrImageProcessorFast): def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, list[tuple]] = None, top_k: int = 100 ): """ Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`YolosObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. top_k (`int`, *optional*, defaults to 100): Keep only top k bounding boxes before filtering by thresholding. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() prob = prob.view(out_logits.shape[0], -1) k_value = min(top_k, prob.size(1)) topk_values, topk_indexes = torch.topk(prob, k_value, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor") labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, list): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results def post_process_instance_segmentation(self): raise NotImplementedError("Segmentation post-processing is not implemented for Deformable DETR yet.") def post_process_semantic_segmentation(self): raise NotImplementedError("Semantic segmentation post-processing is not implemented for Deformable DETR yet.") def post_process_panoptic_segmentation(self): raise NotImplementedError("Panoptic segmentation post-processing is not implemented for Deformable DETR yet.") __all__ = ["YolosImageProcessorFast"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/yolos/__init__.py
src/transformers/models/yolos/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_yolos import * from .feature_extraction_yolos import * from .image_processing_yolos import * from .image_processing_yolos_fast import * from .modeling_yolos import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/cohere2/modeling_cohere2.py
src/transformers/models/cohere2/modeling_cohere2.py
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/cohere2/modular_cohere2.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_cohere2.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2024 Cohere Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Callable from typing import Optional, Union import torch import torch.nn as nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...integrations import use_kernelized_func from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...utils.generic import check_model_inputs, maybe_autocast from .configuration_cohere2 import Cohere2Config class Cohere2RotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: Cohere2Config, device=None): super().__init__() self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_type = self.config.rope_parameters["rope_type"] rope_init_fn: Callable = self.compute_default_rope_parameters if self.rope_type != "default": rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False) @staticmethod def compute_default_rope_parameters( config: Optional[Cohere2Config] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with maybe_autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.repeat_interleave(freqs, 2, dim=-1) # diff from Llama: we interleave() instead of cat() cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class Cohere2LayerNorm(nn.Module): def __init__(self, hidden_size=None, eps=1e-5, bias=False): """The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim""" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) mean = hidden_states.mean(-1, keepdim=True) variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon) hidden_states = self.weight.to(torch.float32) * hidden_states return hidden_states.to(input_dtype) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights def rotate_half(x): # Split and rotate. Note that this function is different from e.g. Llama. x1 = x[..., ::2] x2 = x[..., 1::2] rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2) return rot_x def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ dtype = q.dtype q = q.float() k = k.float() cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed.to(dtype=dtype), k_embed.to(dtype=dtype) @use_kernelized_func(apply_rotary_pos_emb) class Cohere2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Cohere2Config, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None self.sliding_window = config.sliding_window if layer_type == "sliding_attention" else None self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings if self.sliding_window is not None: query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Cohere2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj class Cohere2DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Cohere2Config, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Cohere2Attention(config=config, layer_idx=layer_idx) self.mlp = Cohere2MLP(config) self.input_layernorm = Cohere2LayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps) self.attention_type = config.layer_types[layer_idx] def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. past_key_values (`Cache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states_attention, _ = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states_mlp = self.mlp(hidden_states) hidden_states = residual + hidden_states_attention + hidden_states_mlp return hidden_states @auto_docstring class Cohere2PreTrainedModel(PreTrainedModel): config: Cohere2Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Cohere2DecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": Cohere2DecoderLayer, "attentions": Cohere2Attention, } @auto_docstring class Cohere2Model(Cohere2PreTrainedModel): def __init__(self, config: Cohere2Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Cohere2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Cohere2LayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps) self.rotary_emb = Cohere2RotaryEmbedding(config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) if not isinstance(causal_mask_mapping := attention_mask, dict): mask_kwargs = { "config": self.config, "input_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs), } hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask_mapping[decoder_layer.attention_type], position_embeddings=position_embeddings, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_ids=position_ids, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring class Cohere2ForCausalLM(Cohere2PreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} def __init__(self, config): super().__init__(config) self.model = Cohere2Model(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.logit_scale = config.logit_scale self.tie_word_embeddings = config.tie_word_embeddings # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >> from transformers import AutoTokenizer, Cohere2ForCausalLM >> model = Cohere2ForCausalLM.from_pretrained("Cohere2ForAI/c4ai-command-r-v01") >> tokenizer = AutoTokenizer.from_pretrained("Cohere2ForAI/c4ai-command-r-v01") >> prompt = "Hey, are you conscious? Can you talk to me?" >> inputs = tokenizer(prompt, return_tensors="pt") >> # Generate >> generate_ids = model.generate(inputs.input_ids, max_length=30) >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) logits = logits * self.logit_scale # main diff from Llama loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["Cohere2ForCausalLM", "Cohere2Model", "Cohere2PreTrainedModel"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/cohere2/configuration_cohere2.py
src/transformers/models/cohere2/configuration_cohere2.py
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/cohere2/modular_cohere2.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_cohere2.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2024 Cohere Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from ...configuration_utils import PreTrainedConfig, layer_type_validation from ...modeling_rope_utils import RopeParameters class Cohere2Config(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere model according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Instantiating a configuration with the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`CohereModel`] hidden_size (`int`, *optional*, defaults to 8192): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 22528): Dimension of the MLP representations. logit_scale (`float`, *optional*, defaults to 0.0625): The scaling factor for the output logits. num_hidden_layers (`int`, *optional*, defaults to 40): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 5): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 255001): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. sliding_window (`int`, *optional*, defaults to 4096): Size of the sliding window attention context. layer_types (`list`, *optional*): Attention pattern for each layer. ```python >>> from transformers import Cohere2Model, Cohere2Config >>> # Initializing a Cohere Nextmodel configuration >>> configuration = Cohere2Config() >>> # Initializing a model from the Cohere2 configuration >>> model = Cohere2Model(configuration) # doctest: +SKIP >>> # Accessing the model configuration >>> configuration = model.config # doctest: +SKIP ``` """ model_type = "cohere2" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: Optional[int] = 256000, hidden_size: Optional[int] = 8192, intermediate_size: Optional[int] = 22528, logit_scale: Optional[float] = 0.0625, num_hidden_layers: Optional[int] = 40, num_attention_heads: Optional[int] = 64, num_key_value_heads: Optional[int] = None, hidden_act: Optional[str] = "silu", max_position_embeddings: Optional[int] = 8192, initializer_range: Optional[float] = 0.02, layer_norm_eps: Optional[int] = 1e-5, use_cache: Optional[int] = True, pad_token_id: Optional[int] = 0, bos_token_id: Optional[int] = 5, eos_token_id: Optional[int] = 255001, tie_word_embeddings: Optional[bool] = True, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, attention_bias: Optional[bool] = False, attention_dropout: Optional[float] = 0.0, sliding_window: Optional[int] = 4096, layer_types: Optional[list[str]] = None, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.logit_scale = logit_scale self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.sliding_window = sliding_window self.layer_types = layer_types # Need to specify head_dim in the config so it can be used in the attention forward functions self.head_dim = hidden_size // num_attention_heads # BC -> the pattern used to be a simple int, and it's still present in configs on the Hub self._sliding_window_pattern = kwargs.get("sliding_window_pattern", 4) if self.layer_types is None: # BC -> the pattern used to be a simple int, and it's still present in configs on the Hub self._sliding_window_pattern = getattr(self, "sliding_window_pattern", 4) self.layer_types = [ "sliding_attention" if bool((i + 1) % self._sliding_window_pattern) else "full_attention" for i in range(self.num_hidden_layers) ] layer_type_validation(self.layer_types, self.num_hidden_layers) self.rope_parameters = rope_parameters super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["Cohere2Config"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/cohere2/modular_cohere2.py
src/transformers/models/cohere2/modular_cohere2.py
# coding=utf-8 # Copyright 2024 Cohere Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Callable from typing import Optional import torch import torch.nn as nn from ...cache_utils import Cache, DynamicCache from ...configuration_utils import PreTrainedConfig, layer_type_validation from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_outputs import BaseModelOutputWithPast from ...modeling_rope_utils import ( RopeParameters, dynamic_rope_update, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ...utils.generic import maybe_autocast from ..cohere.modeling_cohere import ( CohereAttention, CohereDecoderLayer, CohereForCausalLM, CohereLayerNorm, CoherePreTrainedModel, CohereRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward, ) from ..gemma2.modeling_gemma2 import Gemma2Model logger = logging.get_logger(__name__) class Cohere2Config(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere model according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Instantiating a configuration with the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`CohereModel`] hidden_size (`int`, *optional*, defaults to 8192): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 22528): Dimension of the MLP representations. logit_scale (`float`, *optional*, defaults to 0.0625): The scaling factor for the output logits. num_hidden_layers (`int`, *optional*, defaults to 40): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 5): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 255001): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. sliding_window (`int`, *optional*, defaults to 4096): Size of the sliding window attention context. layer_types (`list`, *optional*): Attention pattern for each layer. ```python >>> from transformers import Cohere2Model, Cohere2Config >>> # Initializing a Cohere Nextmodel configuration >>> configuration = Cohere2Config() >>> # Initializing a model from the Cohere2 configuration >>> model = Cohere2Model(configuration) # doctest: +SKIP >>> # Accessing the model configuration >>> configuration = model.config # doctest: +SKIP ``` """ model_type = "cohere2" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: Optional[int] = 256000, hidden_size: Optional[int] = 8192, intermediate_size: Optional[int] = 22528, logit_scale: Optional[float] = 0.0625, num_hidden_layers: Optional[int] = 40, num_attention_heads: Optional[int] = 64, num_key_value_heads: Optional[int] = None, hidden_act: Optional[str] = "silu", max_position_embeddings: Optional[int] = 8192, initializer_range: Optional[float] = 0.02, layer_norm_eps: Optional[int] = 1e-5, use_cache: Optional[int] = True, pad_token_id: Optional[int] = 0, bos_token_id: Optional[int] = 5, eos_token_id: Optional[int] = 255001, tie_word_embeddings: Optional[bool] = True, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, attention_bias: Optional[bool] = False, attention_dropout: Optional[float] = 0.0, sliding_window: Optional[int] = 4096, layer_types: Optional[list[str]] = None, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.logit_scale = logit_scale self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.sliding_window = sliding_window self.layer_types = layer_types # Need to specify head_dim in the config so it can be used in the attention forward functions self.head_dim = hidden_size // num_attention_heads # BC -> the pattern used to be a simple int, and it's still present in configs on the Hub self._sliding_window_pattern = kwargs.get("sliding_window_pattern", 4) if self.layer_types is None: # BC -> the pattern used to be a simple int, and it's still present in configs on the Hub self._sliding_window_pattern = getattr(self, "sliding_window_pattern", 4) self.layer_types = [ "sliding_attention" if bool((i + 1) % self._sliding_window_pattern) else "full_attention" for i in range(self.num_hidden_layers) ] layer_type_validation(self.layer_types, self.num_hidden_layers) self.rope_parameters = rope_parameters super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) class Cohere2RotaryEmbedding(CohereRotaryEmbedding): @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with maybe_autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.repeat_interleave(freqs, 2, dim=-1) # diff from Llama: we interleave() instead of cat() cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class Cohere2LayerNorm(CohereLayerNorm): pass class Cohere2Attention(CohereAttention): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Cohere2Config, layer_idx: Optional[int] = None): nn.Module.__init__(self) self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None self.sliding_window = config.sliding_window if layer_type == "sliding_attention" else None self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings if self.sliding_window is not None: query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Cohere2DecoderLayer(CohereDecoderLayer): def __init__(self, config: Cohere2Config, layer_idx: int): super().__init__(config, layer_idx) self.attention_type = config.layer_types[layer_idx] def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states_attention, _ = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states_mlp = self.mlp(hidden_states) hidden_states = residual + hidden_states_attention + hidden_states_mlp return hidden_states class Cohere2PreTrainedModel(CoherePreTrainedModel): config: Cohere2Config class Cohere2Model(Gemma2Model): def __init__(self, config: Cohere2Config): super().__init__(config) self.norm = Cohere2LayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) if not isinstance(causal_mask_mapping := attention_mask, dict): mask_kwargs = { "config": self.config, "input_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs), } hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask_mapping[decoder_layer.attention_type], position_embeddings=position_embeddings, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_ids=position_ids, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) class Cohere2ForCausalLM(CohereForCausalLM): pass __all__ = ["Cohere2Config", "Cohere2ForCausalLM", "Cohere2Model", "Cohere2PreTrainedModel"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/cohere2/__init__.py
src/transformers/models/cohere2/__init__.py
# Copyright 2024 Cohere and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_cohere2 import * from .modeling_cohere2 import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/visual_bert/configuration_visual_bert.py
src/transformers/models/visual_bert/configuration_visual_bert.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VisualBERT model configuration""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class VisualBertConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`VisualBertModel`]. It is used to instantiate an VisualBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VisualBERT [uclanlp/visualbert-vqa-coco-pre](https://huggingface.co/uclanlp/visualbert-vqa-coco-pre) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the VisualBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`VisualBertModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the `inputs_ids` passed to the forward method of [`VisualBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. visual_embedding_dim (`int`, *optional*, defaults to 512): Dimensionality of the visual embeddings to be passed to the model. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`VisualBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. bypass_transformer (`bool`, *optional*, defaults to `False`): Whether or not the model should bypass the transformer for the visual embeddings. If set to `True`, the model directly concatenates the visual embeddings from [`VisualBertEmbeddings`] with text output from transformers, and then pass it to a self-attention layer. special_visual_initialize (`bool`, *optional*, defaults to `True`): Whether or not the visual token type and position type embedding weights should be initialized the same as the textual token type and positive type embeddings. When set to `True`, the weights of the textual token type and position type embeddings are copied to the respective visual embedding layers. Example: ```python >>> from transformers import VisualBertConfig, VisualBertModel >>> # Initializing a VisualBERT visualbert-vqa-coco-pre style configuration >>> configuration = VisualBertConfig.from_pretrained("uclanlp/visualbert-vqa-coco-pre") >>> # Initializing a model (with random weights) from the visualbert-vqa-coco-pre style configuration >>> model = VisualBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "visual_bert" def __init__( self, vocab_size=30522, hidden_size=768, visual_embedding_dim=512, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, bypass_transformer=False, special_visual_initialize=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.visual_embedding_dim = visual_embedding_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.bypass_transformer = bypass_transformer self.special_visual_initialize = special_visual_initialize __all__ = ["VisualBertConfig"]
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/visual_bert/modeling_visual_bert.py
src/transformers/models/visual_bert/modeling_visual_bert.py
# coding=utf-8 # Copyright 2021 The UCLA NLP Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch VisualBERT model.""" import math from dataclasses import dataclass from typing import Optional, Union import torch from torch import nn from torch.nn import CrossEntropyLoss, KLDivLoss, LogSoftmax from ... import initialization as init from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MultipleChoiceModelOutput, SequenceClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ModelOutput, auto_docstring, logging from .configuration_visual_bert import VisualBertConfig logger = logging.get_logger(__name__) class VisualBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings and visual embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) # For Visual Features # Token type and position embedding for image features self.visual_token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.visual_position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) if config.special_visual_initialize: self.visual_token_type_embeddings.weight.data = nn.Parameter( self.token_type_embeddings.weight.data.clone(), requires_grad=True ) self.visual_position_embeddings.weight.data = nn.Parameter( self.position_embeddings.weight.data.clone(), requires_grad=True ) self.visual_projection = nn.Linear(config.visual_embedding_dim, config.hidden_size) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, visual_embeds=None, visual_token_type_ids=None, image_text_alignment=None, ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings # Absolute Position Embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings if visual_embeds is not None: if visual_token_type_ids is None: visual_token_type_ids = torch.ones( visual_embeds.size()[:-1], dtype=torch.long, device=self.position_ids.device ) visual_embeds = self.visual_projection(visual_embeds) visual_token_type_embeddings = self.visual_token_type_embeddings(visual_token_type_ids) if image_text_alignment is not None: # image_text_alignment = Batch x image_length x alignment_number. # Each element denotes the position of the word corresponding to the image feature. -1 is the padding value. dtype = token_type_embeddings.dtype image_text_alignment_mask = (image_text_alignment != -1).long() # Get rid of the -1. image_text_alignment = image_text_alignment_mask * image_text_alignment # Batch x image_length x alignment length x dim visual_position_embeddings = self.position_embeddings(image_text_alignment) visual_position_embeddings *= image_text_alignment_mask.to(dtype=dtype).unsqueeze(-1) visual_position_embeddings = visual_position_embeddings.sum(2) # We want to average along the alignment_number dimension. image_text_alignment_mask = image_text_alignment_mask.to(dtype=dtype).sum(2) if (image_text_alignment_mask == 0).sum() != 0: image_text_alignment_mask[image_text_alignment_mask == 0] = 1 # Avoid divide by zero error logger.warning( "Found 0 values in `image_text_alignment_mask`. Setting them to 1 to avoid divide-by-zero" " error." ) visual_position_embeddings = visual_position_embeddings / image_text_alignment_mask.unsqueeze(-1) visual_position_ids = torch.zeros( *visual_embeds.size()[:-1], dtype=torch.long, device=visual_embeds.device ) # When fine-tuning the detector , the image_text_alignment is sometimes padded too long. if visual_position_embeddings.size(1) != visual_embeds.size(1): if visual_position_embeddings.size(1) < visual_embeds.size(1): raise ValueError( f"Visual position embeddings length: {visual_position_embeddings.size(1)} " f"should be the same as `visual_embeds` length: {visual_embeds.size(1)}" ) visual_position_embeddings = visual_position_embeddings[:, : visual_embeds.size(1), :] visual_position_embeddings = visual_position_embeddings + self.visual_position_embeddings( visual_position_ids ) else: visual_position_ids = torch.zeros( *visual_embeds.size()[:-1], dtype=torch.long, device=visual_embeds.device ) visual_position_embeddings = self.visual_position_embeddings(visual_position_ids) visual_embeddings = visual_embeds + visual_position_embeddings + visual_token_type_embeddings embeddings = torch.cat((embeddings, visual_embeddings), dim=1) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class VisualBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward( self, hidden_states, attention_mask=None, output_attentions=False, ): batch_size, seq_length, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in VisualBertSelfAttentionModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->VisualBert class VisualBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class VisualBertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = VisualBertSelfAttention(config) self.output = VisualBertSelfOutput(config) def forward( self, hidden_states, attention_mask=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->VisualBert class VisualBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->VisualBert class VisualBertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class VisualBertLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = VisualBertAttention(config) self.intermediate = VisualBertIntermediate(config) self.output = VisualBertOutput(config) def forward( self, hidden_states, attention_mask=None, output_attentions=False, ): self_attention_outputs = self.attention( hidden_states, attention_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class VisualBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([VisualBertLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, all_self_attentions, ] if v is not None ) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions ) # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->VisualBert class VisualBertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->VisualBert class VisualBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->VisualBert class VisualBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = VisualBertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=True) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->VisualBert class VisualBertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = VisualBertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score @auto_docstring class VisualBertPreTrainedModel(PreTrainedModel): config: VisualBertConfig base_model_prefix = "visual_bert" input_modalities = ("image", "text") supports_gradient_checkpointing = True @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Embedding)): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if hasattr(module, "bias") and module.bias is not None: init.zeros_(module.bias) elif isinstance(module, nn.LayerNorm): init.zeros_(module.bias) init.ones_(module.weight) elif isinstance(module, VisualBertLMPredictionHead): init.zeros_(module.bias) elif isinstance(module, VisualBertEmbeddings): init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1))) @dataclass @auto_docstring( custom_intro=""" Output type of [`VisualBertForPreTraining`]. """ ) class VisualBertForPreTrainingOutput(ModelOutput): r""" loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the sentence-image prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the sentence-image prediction (classification) head (scores of True/False continuation before SoftMax). """ loss: Optional[torch.FloatTensor] = None prediction_logits: Optional[torch.FloatTensor] = None seq_relationship_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @auto_docstring( custom_intro=""" The model can behave as an encoder (with only self-attention) following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ ) class VisualBertModel(VisualBertPreTrainedModel): def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = VisualBertEmbeddings(config) self.encoder = VisualBertEncoder(config) self.pooler = VisualBertPooler(config) if add_pooling_layer else None self.bypass_transformer = config.bypass_transformer if self.bypass_transformer: self.additional_layer = VisualBertLayer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, visual_embeds: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.LongTensor] = None, visual_token_type_ids: Optional[torch.LongTensor] = None, image_text_alignment: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]: r""" visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*): The embedded representation of the visual inputs, generally derived using using an object detector. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Segment token indices to indicate different portions of the visual embeds. [What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the *visual_token_type_ids* to *1* for all tokens. image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*): Image-Text alignment uses to decide the position IDs of the visual embeddings. Example: ```python # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image. from transformers import AutoTokenizer, VisualBertModel import torch tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") model = VisualBertModel.from_pretrained("uclanlp/visualbert-vqa-coco-pre") inputs = tokenizer("The capital of France is Paris.", return_tensors="pt") visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) inputs.update( { "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, } ) outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if visual_embeds is not None: visual_input_shape = visual_embeds.size()[:-1] if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if visual_embeds is not None and visual_attention_mask is None: visual_attention_mask = torch.ones(visual_input_shape, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if visual_embeds is not None: combined_attention_mask = torch.cat((attention_mask, visual_attention_mask), dim=-1) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( combined_attention_mask, (batch_size, input_shape + visual_input_shape) ) else: extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, (batch_size, input_shape) ) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, visual_embeds=visual_embeds, visual_token_type_ids=visual_token_type_ids, image_text_alignment=image_text_alignment, ) if self.bypass_transformer and visual_embeds is not None: text_length = input_ids.size(1) text_embedding_output = embedding_output[:, :text_length, :] visual_embedding_output = embedding_output[:, text_length:, :] text_extended_attention_mask = extended_attention_mask[:, :, text_length, :text_length] encoded_outputs = self.encoder( text_embedding_output, attention_mask=text_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoded_outputs[0] concatenated_input = torch.cat((sequence_output, visual_embedding_output), dim=1) sequence_output = self.additional_layer(concatenated_input, extended_attention_mask) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None else: encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" VisualBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `sentence-image prediction (classification)` head. """ ) class VisualBertForPreTraining(VisualBertPreTrainedModel): _tied_weights_keys = { "cls.predictions.decoder.bias": "cls.predictions.bias", "cls.predictions.decoder.weight": "visual_bert.embeddings.word_embeddings.weight", } def __init__(self, config): super().__init__(config) self.visual_bert = VisualBertModel(config) self.cls = VisualBertPreTrainingHeads(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, visual_embeds: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.LongTensor] = None, visual_token_type_ids: Optional[torch.LongTensor] = None, image_text_alignment: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, sentence_image_labels: Optional[torch.LongTensor] = None, **kwargs, ) -> Union[tuple[torch.Tensor], VisualBertForPreTrainingOutput]: r""" visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*): The embedded representation of the visual inputs, generally derived using using an object detector. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Segment token indices to indicate different portions of the visual embeds. [What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the *visual_token_type_ids* to *1* for all tokens. image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*): Image-Text alignment uses to decide the position IDs of the visual embeddings. labels (`torch.LongTensor` of shape `(batch_size, total_sequence_length)`, *optional*):
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
true
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py
src/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert VisualBert checkpoint.""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) rename_keys_prefix = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] ACCEPTABLE_CHECKPOINTS = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def load_state_dict(checkpoint_path): sd = torch.load(checkpoint_path, map_location="cpu", weights_only=True) return sd def get_new_dict(d, config, rename_keys_prefix=rename_keys_prefix): new_d = OrderedDict() new_d["visual_bert.embeddings.position_ids"] = torch.arange(config.max_position_embeddings).expand((1, -1)) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue new_key = key for name_pair in rename_keys_prefix: new_key = new_key.replace(name_pair[0], name_pair[1]) new_d[new_key] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately new_d["cls.predictions.decoder.bias"] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def convert_visual_bert_checkpoint(checkpoint_path, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our VisualBERT structure. """ assert checkpoint_path.split("/")[-1] in ACCEPTABLE_CHECKPOINTS, ( f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}." ) # Get Config if "pre" in checkpoint_path: model_type = "pretraining" if "vcr" in checkpoint_path: config_params = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: config_params = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: config_params = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: config_params = {"visual_embedding_dim": 1024} else: raise NotImplementedError(f"No implementation found for `{checkpoint_path}`.") else: if "vcr" in checkpoint_path: config_params = {"visual_embedding_dim": 512} model_type = "multichoice" elif "vqa_advanced" in checkpoint_path: config_params = {"visual_embedding_dim": 2048} model_type = "vqa_advanced" elif "vqa" in checkpoint_path: config_params = {"visual_embedding_dim": 2048, "num_labels": 3129} model_type = "vqa" elif "nlvr" in checkpoint_path: config_params = { "visual_embedding_dim": 1024, "num_labels": 2, } model_type = "nlvr" config = VisualBertConfig(**config_params) # Load State Dict state_dict = load_state_dict(checkpoint_path) new_state_dict = get_new_dict(state_dict, config) if model_type == "pretraining": model = VisualBertForPreTraining(config) elif model_type == "vqa": model = VisualBertForQuestionAnswering(config) elif model_type == "nlvr": model = VisualBertForVisualReasoning(config) elif model_type == "multichoice": model = VisualBertForMultipleChoice(config) model.load_state_dict(new_state_dict) # Save Checkpoints Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") args = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false
huggingface/transformers
https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/visual_bert/__init__.py
src/transformers/models/visual_bert/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_visual_bert import * from .modeling_visual_bert import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
Apache-2.0
a7f29523361b2cc12e51c1f5133d95f122f6f45c
2026-01-04T14:38:15.407064Z
false