Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__init__.py +102 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/feature_extraction_convnext.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/image_processing_convnext.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_convnext.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_tf_convnext.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py +141 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py +243 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py +33 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py +320 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py +559 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py +566 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/__init__.py +64 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/configuration_cpmant.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/modeling_cpmant.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/configuration_cpmant.py +123 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/modeling_cpmant.py +879 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/tokenization_cpmant.py +277 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py +111 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py +220 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py +536 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__init__.py +97 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_dalle_to_flava_codebook.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_flava_original_pytorch_to_hf.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/feature_extraction_flava.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/image_processing_flava.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/modeling_flava.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/processing_flava.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/configuration_flava.py +764 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/convert_dalle_to_flava_codebook.py +102 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/convert_flava_original_pytorch_to_hf.py +99 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/feature_extraction_flava.py +33 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/image_processing_flava.py +694 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/modeling_flava.py +2099 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/processing_flava.py +164 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py +110 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc +0 -0
- evalkit_tf433/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc +0 -0
evalkit_tf433/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc
ADDED
|
Binary file (5.44 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc
ADDED
|
Binary file (24 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__init__.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import (
|
| 17 |
+
OptionalDependencyNotAvailable,
|
| 18 |
+
_LazyModule,
|
| 19 |
+
is_tf_available,
|
| 20 |
+
is_torch_available,
|
| 21 |
+
is_vision_available,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
_import_structure = {
|
| 26 |
+
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
if not is_vision_available():
|
| 31 |
+
raise OptionalDependencyNotAvailable()
|
| 32 |
+
except OptionalDependencyNotAvailable:
|
| 33 |
+
pass
|
| 34 |
+
else:
|
| 35 |
+
_import_structure["feature_extraction_convnext"] = ["ConvNextFeatureExtractor"]
|
| 36 |
+
_import_structure["image_processing_convnext"] = ["ConvNextImageProcessor"]
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
if not is_torch_available():
|
| 40 |
+
raise OptionalDependencyNotAvailable()
|
| 41 |
+
except OptionalDependencyNotAvailable:
|
| 42 |
+
pass
|
| 43 |
+
else:
|
| 44 |
+
_import_structure["modeling_convnext"] = [
|
| 45 |
+
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
| 46 |
+
"ConvNextForImageClassification",
|
| 47 |
+
"ConvNextModel",
|
| 48 |
+
"ConvNextPreTrainedModel",
|
| 49 |
+
"ConvNextBackbone",
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
if not is_tf_available():
|
| 54 |
+
raise OptionalDependencyNotAvailable()
|
| 55 |
+
except OptionalDependencyNotAvailable:
|
| 56 |
+
pass
|
| 57 |
+
else:
|
| 58 |
+
_import_structure["modeling_tf_convnext"] = [
|
| 59 |
+
"TFConvNextForImageClassification",
|
| 60 |
+
"TFConvNextModel",
|
| 61 |
+
"TFConvNextPreTrainedModel",
|
| 62 |
+
]
|
| 63 |
+
|
| 64 |
+
if TYPE_CHECKING:
|
| 65 |
+
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
|
| 66 |
+
|
| 67 |
+
try:
|
| 68 |
+
if not is_vision_available():
|
| 69 |
+
raise OptionalDependencyNotAvailable()
|
| 70 |
+
except OptionalDependencyNotAvailable:
|
| 71 |
+
pass
|
| 72 |
+
else:
|
| 73 |
+
from .feature_extraction_convnext import ConvNextFeatureExtractor
|
| 74 |
+
from .image_processing_convnext import ConvNextImageProcessor
|
| 75 |
+
|
| 76 |
+
try:
|
| 77 |
+
if not is_torch_available():
|
| 78 |
+
raise OptionalDependencyNotAvailable()
|
| 79 |
+
except OptionalDependencyNotAvailable:
|
| 80 |
+
pass
|
| 81 |
+
else:
|
| 82 |
+
from .modeling_convnext import (
|
| 83 |
+
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
| 84 |
+
ConvNextBackbone,
|
| 85 |
+
ConvNextForImageClassification,
|
| 86 |
+
ConvNextModel,
|
| 87 |
+
ConvNextPreTrainedModel,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
try:
|
| 91 |
+
if not is_tf_available():
|
| 92 |
+
raise OptionalDependencyNotAvailable()
|
| 93 |
+
except OptionalDependencyNotAvailable:
|
| 94 |
+
pass
|
| 95 |
+
else:
|
| 96 |
+
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
else:
|
| 100 |
+
import sys
|
| 101 |
+
|
| 102 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.58 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc
ADDED
|
Binary file (5.91 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc
ADDED
|
Binary file (7.13 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/feature_extraction_convnext.cpython-310.pyc
ADDED
|
Binary file (1.02 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/image_processing_convnext.cpython-310.pyc
ADDED
|
Binary file (13.1 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_convnext.cpython-310.pyc
ADDED
|
Binary file (18.2 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_tf_convnext.cpython-310.pyc
ADDED
|
Binary file (18.8 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" ConvNeXT model configuration"""
|
| 16 |
+
|
| 17 |
+
from collections import OrderedDict
|
| 18 |
+
from typing import Mapping
|
| 19 |
+
|
| 20 |
+
from packaging import version
|
| 21 |
+
|
| 22 |
+
from ...configuration_utils import PretrainedConfig
|
| 23 |
+
from ...onnx import OnnxConfig
|
| 24 |
+
from ...utils import logging
|
| 25 |
+
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
logger = logging.get_logger(__name__)
|
| 29 |
+
|
| 30 |
+
CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 31 |
+
"facebook/convnext-tiny-224": "https://huggingface.co/facebook/convnext-tiny-224/resolve/main/config.json",
|
| 32 |
+
# See all ConvNeXT models at https://huggingface.co/models?filter=convnext
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class ConvNextConfig(BackboneConfigMixin, PretrainedConfig):
|
| 37 |
+
r"""
|
| 38 |
+
This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an
|
| 39 |
+
ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 40 |
+
with the defaults will yield a similar configuration to that of the ConvNeXT
|
| 41 |
+
[facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture.
|
| 42 |
+
|
| 43 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 44 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 48 |
+
The number of input channels.
|
| 49 |
+
patch_size (`int`, optional, defaults to 4):
|
| 50 |
+
Patch size to use in the patch embedding layer.
|
| 51 |
+
num_stages (`int`, optional, defaults to 4):
|
| 52 |
+
The number of stages in the model.
|
| 53 |
+
hidden_sizes (`List[int]`, *optional*, defaults to [96, 192, 384, 768]):
|
| 54 |
+
Dimensionality (hidden size) at each stage.
|
| 55 |
+
depths (`List[int]`, *optional*, defaults to [3, 3, 9, 3]):
|
| 56 |
+
Depth (number of blocks) for each stage.
|
| 57 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 58 |
+
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
|
| 59 |
+
`"selu"` and `"gelu_new"` are supported.
|
| 60 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 61 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 62 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 63 |
+
The epsilon used by the layer normalization layers.
|
| 64 |
+
layer_scale_init_value (`float`, *optional*, defaults to 1e-6):
|
| 65 |
+
The initial value for the layer scale.
|
| 66 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
| 67 |
+
The drop rate for stochastic depth.
|
| 68 |
+
out_features (`List[str]`, *optional*):
|
| 69 |
+
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
|
| 70 |
+
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
|
| 71 |
+
corresponding stages. If unset and `out_indices` is unset, will default to the last stage.
|
| 72 |
+
out_indices (`List[int]`, *optional*):
|
| 73 |
+
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
|
| 74 |
+
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
|
| 75 |
+
If unset and `out_features` is unset, will default to the last stage.
|
| 76 |
+
|
| 77 |
+
Example:
|
| 78 |
+
```python
|
| 79 |
+
>>> from transformers import ConvNextConfig, ConvNextModel
|
| 80 |
+
|
| 81 |
+
>>> # Initializing a ConvNext convnext-tiny-224 style configuration
|
| 82 |
+
>>> configuration = ConvNextConfig()
|
| 83 |
+
|
| 84 |
+
>>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration
|
| 85 |
+
>>> model = ConvNextModel(configuration)
|
| 86 |
+
|
| 87 |
+
>>> # Accessing the model configuration
|
| 88 |
+
>>> configuration = model.config
|
| 89 |
+
```"""
|
| 90 |
+
model_type = "convnext"
|
| 91 |
+
|
| 92 |
+
def __init__(
|
| 93 |
+
self,
|
| 94 |
+
num_channels=3,
|
| 95 |
+
patch_size=4,
|
| 96 |
+
num_stages=4,
|
| 97 |
+
hidden_sizes=None,
|
| 98 |
+
depths=None,
|
| 99 |
+
hidden_act="gelu",
|
| 100 |
+
initializer_range=0.02,
|
| 101 |
+
layer_norm_eps=1e-12,
|
| 102 |
+
layer_scale_init_value=1e-6,
|
| 103 |
+
drop_path_rate=0.0,
|
| 104 |
+
image_size=224,
|
| 105 |
+
out_features=None,
|
| 106 |
+
out_indices=None,
|
| 107 |
+
**kwargs,
|
| 108 |
+
):
|
| 109 |
+
super().__init__(**kwargs)
|
| 110 |
+
|
| 111 |
+
self.num_channels = num_channels
|
| 112 |
+
self.patch_size = patch_size
|
| 113 |
+
self.num_stages = num_stages
|
| 114 |
+
self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
|
| 115 |
+
self.depths = [3, 3, 9, 3] if depths is None else depths
|
| 116 |
+
self.hidden_act = hidden_act
|
| 117 |
+
self.initializer_range = initializer_range
|
| 118 |
+
self.layer_norm_eps = layer_norm_eps
|
| 119 |
+
self.layer_scale_init_value = layer_scale_init_value
|
| 120 |
+
self.drop_path_rate = drop_path_rate
|
| 121 |
+
self.image_size = image_size
|
| 122 |
+
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
|
| 123 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 124 |
+
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class ConvNextOnnxConfig(OnnxConfig):
|
| 129 |
+
torch_onnx_minimum_version = version.parse("1.11")
|
| 130 |
+
|
| 131 |
+
@property
|
| 132 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
| 133 |
+
return OrderedDict(
|
| 134 |
+
[
|
| 135 |
+
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
|
| 136 |
+
]
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
@property
|
| 140 |
+
def atol_for_validation(self) -> float:
|
| 141 |
+
return 1e-5
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Convert ConvNext checkpoints from the original repository.
|
| 16 |
+
|
| 17 |
+
URL: https://github.com/facebookresearch/ConvNeXt"""
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
import argparse
|
| 21 |
+
import json
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
|
| 24 |
+
import requests
|
| 25 |
+
import torch
|
| 26 |
+
from huggingface_hub import hf_hub_download
|
| 27 |
+
from PIL import Image
|
| 28 |
+
|
| 29 |
+
from transformers import ConvNextConfig, ConvNextForImageClassification, ConvNextImageProcessor
|
| 30 |
+
from transformers.utils import logging
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
logging.set_verbosity_info()
|
| 34 |
+
logger = logging.get_logger(__name__)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def get_convnext_config(checkpoint_url):
|
| 38 |
+
config = ConvNextConfig()
|
| 39 |
+
|
| 40 |
+
if "tiny" in checkpoint_url:
|
| 41 |
+
depths = [3, 3, 9, 3]
|
| 42 |
+
hidden_sizes = [96, 192, 384, 768]
|
| 43 |
+
if "small" in checkpoint_url:
|
| 44 |
+
depths = [3, 3, 27, 3]
|
| 45 |
+
hidden_sizes = [96, 192, 384, 768]
|
| 46 |
+
if "base" in checkpoint_url:
|
| 47 |
+
depths = [3, 3, 27, 3]
|
| 48 |
+
hidden_sizes = [128, 256, 512, 1024]
|
| 49 |
+
if "large" in checkpoint_url:
|
| 50 |
+
depths = [3, 3, 27, 3]
|
| 51 |
+
hidden_sizes = [192, 384, 768, 1536]
|
| 52 |
+
if "xlarge" in checkpoint_url:
|
| 53 |
+
depths = [3, 3, 27, 3]
|
| 54 |
+
hidden_sizes = [256, 512, 1024, 2048]
|
| 55 |
+
|
| 56 |
+
if "1k" in checkpoint_url:
|
| 57 |
+
num_labels = 1000
|
| 58 |
+
filename = "imagenet-1k-id2label.json"
|
| 59 |
+
expected_shape = (1, 1000)
|
| 60 |
+
else:
|
| 61 |
+
num_labels = 21841
|
| 62 |
+
filename = "imagenet-22k-id2label.json"
|
| 63 |
+
expected_shape = (1, 21841)
|
| 64 |
+
|
| 65 |
+
repo_id = "huggingface/label-files"
|
| 66 |
+
config.num_labels = num_labels
|
| 67 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
| 68 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
| 69 |
+
if "1k" not in checkpoint_url:
|
| 70 |
+
# this dataset contains 21843 labels but the model only has 21841
|
| 71 |
+
# we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
|
| 72 |
+
del id2label[9205]
|
| 73 |
+
del id2label[15027]
|
| 74 |
+
config.id2label = id2label
|
| 75 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
| 76 |
+
config.hidden_sizes = hidden_sizes
|
| 77 |
+
config.depths = depths
|
| 78 |
+
|
| 79 |
+
return config, expected_shape
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def rename_key(name):
|
| 83 |
+
if "downsample_layers.0.0" in name:
|
| 84 |
+
name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
|
| 85 |
+
if "downsample_layers.0.1" in name:
|
| 86 |
+
name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on
|
| 87 |
+
if "downsample_layers.1.0" in name:
|
| 88 |
+
name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
|
| 89 |
+
if "downsample_layers.1.1" in name:
|
| 90 |
+
name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
|
| 91 |
+
if "downsample_layers.2.0" in name:
|
| 92 |
+
name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
|
| 93 |
+
if "downsample_layers.2.1" in name:
|
| 94 |
+
name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
|
| 95 |
+
if "downsample_layers.3.0" in name:
|
| 96 |
+
name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
|
| 97 |
+
if "downsample_layers.3.1" in name:
|
| 98 |
+
name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
|
| 99 |
+
if "stages" in name and "downsampling_layer" not in name:
|
| 100 |
+
# stages.0.0. for instance should be renamed to stages.0.layers.0.
|
| 101 |
+
name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
|
| 102 |
+
if "stages" in name:
|
| 103 |
+
name = name.replace("stages", "encoder.stages")
|
| 104 |
+
if "norm" in name:
|
| 105 |
+
name = name.replace("norm", "layernorm")
|
| 106 |
+
if "gamma" in name:
|
| 107 |
+
name = name.replace("gamma", "layer_scale_parameter")
|
| 108 |
+
if "head" in name:
|
| 109 |
+
name = name.replace("head", "classifier")
|
| 110 |
+
|
| 111 |
+
return name
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# We will verify our results on an image of cute cats
|
| 115 |
+
def prepare_img():
|
| 116 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 117 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
| 118 |
+
return im
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@torch.no_grad()
|
| 122 |
+
def convert_convnext_checkpoint(checkpoint_url, pytorch_dump_folder_path):
|
| 123 |
+
"""
|
| 124 |
+
Copy/paste/tweak model's weights to our ConvNext structure.
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
# define ConvNext configuration based on URL
|
| 128 |
+
config, expected_shape = get_convnext_config(checkpoint_url)
|
| 129 |
+
# load original state_dict from URL
|
| 130 |
+
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"]
|
| 131 |
+
# rename keys
|
| 132 |
+
for key in state_dict.copy().keys():
|
| 133 |
+
val = state_dict.pop(key)
|
| 134 |
+
state_dict[rename_key(key)] = val
|
| 135 |
+
# add prefix to all keys expect classifier head
|
| 136 |
+
for key in state_dict.copy().keys():
|
| 137 |
+
val = state_dict.pop(key)
|
| 138 |
+
if not key.startswith("classifier"):
|
| 139 |
+
key = "convnext." + key
|
| 140 |
+
state_dict[key] = val
|
| 141 |
+
|
| 142 |
+
# load HuggingFace model
|
| 143 |
+
model = ConvNextForImageClassification(config)
|
| 144 |
+
model.load_state_dict(state_dict)
|
| 145 |
+
model.eval()
|
| 146 |
+
|
| 147 |
+
# Check outputs on an image, prepared by ConvNextImageProcessor
|
| 148 |
+
size = 224 if "224" in checkpoint_url else 384
|
| 149 |
+
image_processor = ConvNextImageProcessor(size=size)
|
| 150 |
+
pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values
|
| 151 |
+
|
| 152 |
+
logits = model(pixel_values).logits
|
| 153 |
+
|
| 154 |
+
# note: the logits below were obtained without center cropping
|
| 155 |
+
if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth":
|
| 156 |
+
expected_logits = torch.tensor([-0.1210, -0.6605, 0.1918])
|
| 157 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth":
|
| 158 |
+
expected_logits = torch.tensor([-0.4473, -0.1847, -0.6365])
|
| 159 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth":
|
| 160 |
+
expected_logits = torch.tensor([0.4525, 0.7539, 0.0308])
|
| 161 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_384.pth":
|
| 162 |
+
expected_logits = torch.tensor([0.3561, 0.6350, -0.0384])
|
| 163 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth":
|
| 164 |
+
expected_logits = torch.tensor([0.4174, -0.0989, 0.1489])
|
| 165 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_384.pth":
|
| 166 |
+
expected_logits = torch.tensor([0.2513, -0.1349, -0.1613])
|
| 167 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth":
|
| 168 |
+
expected_logits = torch.tensor([1.2980, 0.3631, -0.1198])
|
| 169 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth":
|
| 170 |
+
expected_logits = torch.tensor([1.2963, 0.1227, 0.1723])
|
| 171 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth":
|
| 172 |
+
expected_logits = torch.tensor([1.7956, 0.8390, 0.2820])
|
| 173 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth":
|
| 174 |
+
expected_logits = torch.tensor([-0.2822, -0.0502, -0.0878])
|
| 175 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth":
|
| 176 |
+
expected_logits = torch.tensor([-0.5672, -0.0730, -0.4348])
|
| 177 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth":
|
| 178 |
+
expected_logits = torch.tensor([0.2681, 0.2365, 0.6246])
|
| 179 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth":
|
| 180 |
+
expected_logits = torch.tensor([-0.2642, 0.3931, 0.5116])
|
| 181 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth":
|
| 182 |
+
expected_logits = torch.tensor([-0.6677, -0.1873, -0.8379])
|
| 183 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth":
|
| 184 |
+
expected_logits = torch.tensor([-0.7749, -0.2967, -0.6444])
|
| 185 |
+
else:
|
| 186 |
+
raise ValueError(f"Unknown URL: {checkpoint_url}")
|
| 187 |
+
|
| 188 |
+
assert torch.allclose(logits[0, :3], expected_logits, atol=1e-3)
|
| 189 |
+
assert logits.shape == expected_shape
|
| 190 |
+
|
| 191 |
+
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
| 192 |
+
print(f"Saving model to {pytorch_dump_folder_path}")
|
| 193 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
| 194 |
+
print(f"Saving image processor to {pytorch_dump_folder_path}")
|
| 195 |
+
image_processor.save_pretrained(pytorch_dump_folder_path)
|
| 196 |
+
|
| 197 |
+
print("Pushing model to the hub...")
|
| 198 |
+
model_name = "convnext"
|
| 199 |
+
if "tiny" in checkpoint_url:
|
| 200 |
+
model_name += "-tiny"
|
| 201 |
+
elif "small" in checkpoint_url:
|
| 202 |
+
model_name += "-small"
|
| 203 |
+
elif "base" in checkpoint_url:
|
| 204 |
+
model_name += "-base"
|
| 205 |
+
elif "xlarge" in checkpoint_url:
|
| 206 |
+
model_name += "-xlarge"
|
| 207 |
+
elif "large" in checkpoint_url:
|
| 208 |
+
model_name += "-large"
|
| 209 |
+
if "224" in checkpoint_url:
|
| 210 |
+
model_name += "-224"
|
| 211 |
+
elif "384" in checkpoint_url:
|
| 212 |
+
model_name += "-384"
|
| 213 |
+
if "22k" in checkpoint_url and "1k" not in checkpoint_url:
|
| 214 |
+
model_name += "-22k"
|
| 215 |
+
if "22k" in checkpoint_url and "1k" in checkpoint_url:
|
| 216 |
+
model_name += "-22k-1k"
|
| 217 |
+
|
| 218 |
+
model.push_to_hub(
|
| 219 |
+
repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
|
| 220 |
+
organization="nielsr",
|
| 221 |
+
commit_message="Add model",
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
if __name__ == "__main__":
|
| 226 |
+
parser = argparse.ArgumentParser()
|
| 227 |
+
# Required parameters
|
| 228 |
+
parser.add_argument(
|
| 229 |
+
"--checkpoint_url",
|
| 230 |
+
default="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
|
| 231 |
+
type=str,
|
| 232 |
+
help="URL of the original ConvNeXT checkpoint you'd like to convert.",
|
| 233 |
+
)
|
| 234 |
+
parser.add_argument(
|
| 235 |
+
"--pytorch_dump_folder_path",
|
| 236 |
+
default=None,
|
| 237 |
+
type=str,
|
| 238 |
+
required=True,
|
| 239 |
+
help="Path to the output PyTorch model directory.",
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
args = parser.parse_args()
|
| 243 |
+
convert_convnext_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Feature extractor class for ConvNeXT."""
|
| 16 |
+
|
| 17 |
+
import warnings
|
| 18 |
+
|
| 19 |
+
from ...utils import logging
|
| 20 |
+
from .image_processing_convnext import ConvNextImageProcessor
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.get_logger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class ConvNextFeatureExtractor(ConvNextImageProcessor):
|
| 27 |
+
def __init__(self, *args, **kwargs) -> None:
|
| 28 |
+
warnings.warn(
|
| 29 |
+
"The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
|
| 30 |
+
" Please use ConvNextImageProcessor instead.",
|
| 31 |
+
FutureWarning,
|
| 32 |
+
)
|
| 33 |
+
super().__init__(*args, **kwargs)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Image processor class for ConvNeXT."""
|
| 16 |
+
|
| 17 |
+
from typing import Dict, List, Optional, Union
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
| 22 |
+
from ...image_transforms import (
|
| 23 |
+
center_crop,
|
| 24 |
+
get_resize_output_image_size,
|
| 25 |
+
resize,
|
| 26 |
+
to_channel_dimension_format,
|
| 27 |
+
)
|
| 28 |
+
from ...image_utils import (
|
| 29 |
+
IMAGENET_STANDARD_MEAN,
|
| 30 |
+
IMAGENET_STANDARD_STD,
|
| 31 |
+
ChannelDimension,
|
| 32 |
+
ImageInput,
|
| 33 |
+
PILImageResampling,
|
| 34 |
+
infer_channel_dimension_format,
|
| 35 |
+
is_scaled_image,
|
| 36 |
+
make_list_of_images,
|
| 37 |
+
to_numpy_array,
|
| 38 |
+
valid_images,
|
| 39 |
+
)
|
| 40 |
+
from ...utils import TensorType, is_vision_available, logging
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
if is_vision_available():
|
| 44 |
+
import PIL
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class ConvNextImageProcessor(BaseImageProcessor):
|
| 51 |
+
r"""
|
| 52 |
+
Constructs a ConvNeXT image processor.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
| 56 |
+
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden
|
| 57 |
+
by `do_resize` in the `preprocess` method.
|
| 58 |
+
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`):
|
| 59 |
+
Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is
|
| 60 |
+
resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will
|
| 61 |
+
be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to
|
| 62 |
+
`(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can
|
| 63 |
+
be overriden by `size` in the `preprocess` method.
|
| 64 |
+
crop_pct (`float` *optional*, defaults to 224 / 256):
|
| 65 |
+
Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be
|
| 66 |
+
overriden by `crop_pct` in the `preprocess` method.
|
| 67 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
|
| 68 |
+
Resampling filter to use if resizing the image. Can be overriden by `resample` in the `preprocess` method.
|
| 69 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
| 70 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in
|
| 71 |
+
the `preprocess` method.
|
| 72 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
| 73 |
+
Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess`
|
| 74 |
+
method.
|
| 75 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
| 76 |
+
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
|
| 77 |
+
method.
|
| 78 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
|
| 79 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
| 80 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
| 81 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
|
| 82 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
| 83 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
model_input_names = ["pixel_values"]
|
| 87 |
+
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
do_resize: bool = True,
|
| 91 |
+
size: Dict[str, int] = None,
|
| 92 |
+
crop_pct: float = None,
|
| 93 |
+
resample: PILImageResampling = PILImageResampling.BILINEAR,
|
| 94 |
+
do_rescale: bool = True,
|
| 95 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
| 96 |
+
do_normalize: bool = True,
|
| 97 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 98 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 99 |
+
**kwargs,
|
| 100 |
+
) -> None:
|
| 101 |
+
super().__init__(**kwargs)
|
| 102 |
+
size = size if size is not None else {"shortest_edge": 384}
|
| 103 |
+
size = get_size_dict(size, default_to_square=False)
|
| 104 |
+
|
| 105 |
+
self.do_resize = do_resize
|
| 106 |
+
self.size = size
|
| 107 |
+
# Default value set here for backwards compatibility where the value in config is None
|
| 108 |
+
self.crop_pct = crop_pct if crop_pct is not None else 224 / 256
|
| 109 |
+
self.resample = resample
|
| 110 |
+
self.do_rescale = do_rescale
|
| 111 |
+
self.rescale_factor = rescale_factor
|
| 112 |
+
self.do_normalize = do_normalize
|
| 113 |
+
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
|
| 114 |
+
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
|
| 115 |
+
|
| 116 |
+
def resize(
|
| 117 |
+
self,
|
| 118 |
+
image: np.ndarray,
|
| 119 |
+
size: Dict[str, int],
|
| 120 |
+
crop_pct: float,
|
| 121 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
| 122 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 123 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 124 |
+
**kwargs,
|
| 125 |
+
) -> np.ndarray:
|
| 126 |
+
"""
|
| 127 |
+
Resize an image.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
image (`np.ndarray`):
|
| 131 |
+
Image to resize.
|
| 132 |
+
size (`Dict[str, int]`):
|
| 133 |
+
Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If
|
| 134 |
+
`size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`.
|
| 135 |
+
Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`,
|
| 136 |
+
after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`.
|
| 137 |
+
crop_pct (`float`):
|
| 138 |
+
Percentage of the image to crop. Only has an effect if size < 384.
|
| 139 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
|
| 140 |
+
Resampling filter to use when resizing the image.
|
| 141 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
| 142 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
| 143 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 144 |
+
The channel dimension format of the input image. If not provided, it will be inferred from the input
|
| 145 |
+
image.
|
| 146 |
+
"""
|
| 147 |
+
size = get_size_dict(size, default_to_square=False)
|
| 148 |
+
if "shortest_edge" not in size:
|
| 149 |
+
raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
|
| 150 |
+
shortest_edge = size["shortest_edge"]
|
| 151 |
+
|
| 152 |
+
if shortest_edge < 384:
|
| 153 |
+
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
|
| 154 |
+
resize_shortest_edge = int(shortest_edge / crop_pct)
|
| 155 |
+
resize_size = get_resize_output_image_size(
|
| 156 |
+
image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format
|
| 157 |
+
)
|
| 158 |
+
image = resize(
|
| 159 |
+
image=image,
|
| 160 |
+
size=resize_size,
|
| 161 |
+
resample=resample,
|
| 162 |
+
data_format=data_format,
|
| 163 |
+
input_data_format=input_data_format,
|
| 164 |
+
**kwargs,
|
| 165 |
+
)
|
| 166 |
+
# then crop to (shortest_edge, shortest_edge)
|
| 167 |
+
return center_crop(
|
| 168 |
+
image=image,
|
| 169 |
+
size=(shortest_edge, shortest_edge),
|
| 170 |
+
data_format=data_format,
|
| 171 |
+
input_data_format=input_data_format,
|
| 172 |
+
**kwargs,
|
| 173 |
+
)
|
| 174 |
+
else:
|
| 175 |
+
# warping (no cropping) when evaluated at 384 or larger
|
| 176 |
+
return resize(
|
| 177 |
+
image,
|
| 178 |
+
size=(shortest_edge, shortest_edge),
|
| 179 |
+
resample=resample,
|
| 180 |
+
data_format=data_format,
|
| 181 |
+
input_data_format=input_data_format,
|
| 182 |
+
**kwargs,
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
def preprocess(
|
| 186 |
+
self,
|
| 187 |
+
images: ImageInput,
|
| 188 |
+
do_resize: bool = None,
|
| 189 |
+
size: Dict[str, int] = None,
|
| 190 |
+
crop_pct: float = None,
|
| 191 |
+
resample: PILImageResampling = None,
|
| 192 |
+
do_rescale: bool = None,
|
| 193 |
+
rescale_factor: float = None,
|
| 194 |
+
do_normalize: bool = None,
|
| 195 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 196 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 197 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
| 198 |
+
data_format: ChannelDimension = ChannelDimension.FIRST,
|
| 199 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 200 |
+
**kwargs,
|
| 201 |
+
) -> PIL.Image.Image:
|
| 202 |
+
"""
|
| 203 |
+
Preprocess an image or batch of images.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
images (`ImageInput`):
|
| 207 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
| 208 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
| 209 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
| 210 |
+
Whether to resize the image.
|
| 211 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
| 212 |
+
Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
|
| 213 |
+
is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
|
| 214 |
+
image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
|
| 215 |
+
`(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
|
| 216 |
+
crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
|
| 217 |
+
Percentage of the image to crop if size < 384.
|
| 218 |
+
resample (`int`, *optional*, defaults to `self.resample`):
|
| 219 |
+
Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
|
| 220 |
+
has an effect if `do_resize` is set to `True`.
|
| 221 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
| 222 |
+
Whether to rescale the image values between [0 - 1].
|
| 223 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
| 224 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
| 225 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
| 226 |
+
Whether to normalize the image.
|
| 227 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
| 228 |
+
Image mean.
|
| 229 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
| 230 |
+
Image standard deviation.
|
| 231 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
| 232 |
+
The type of tensors to return. Can be one of:
|
| 233 |
+
- Unset: Return a list of `np.ndarray`.
|
| 234 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
| 235 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
| 236 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
| 237 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
| 238 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
| 239 |
+
The channel dimension format for the output image. Can be one of:
|
| 240 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 241 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 242 |
+
- Unset: Use the channel dimension format of the input image.
|
| 243 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 244 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
| 245 |
+
from the input image. Can be one of:
|
| 246 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 247 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 248 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
| 249 |
+
"""
|
| 250 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
| 251 |
+
crop_pct = crop_pct if crop_pct is not None else self.crop_pct
|
| 252 |
+
resample = resample if resample is not None else self.resample
|
| 253 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
| 254 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
| 255 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
| 256 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
| 257 |
+
image_std = image_std if image_std is not None else self.image_std
|
| 258 |
+
|
| 259 |
+
size = size if size is not None else self.size
|
| 260 |
+
size = get_size_dict(size, default_to_square=False)
|
| 261 |
+
|
| 262 |
+
images = make_list_of_images(images)
|
| 263 |
+
|
| 264 |
+
if not valid_images(images):
|
| 265 |
+
raise ValueError(
|
| 266 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
| 267 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
if do_resize and size is None or resample is None:
|
| 271 |
+
raise ValueError("Size and resample must be specified if do_resize is True.")
|
| 272 |
+
|
| 273 |
+
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
|
| 274 |
+
raise ValueError("crop_pct must be specified if size < 384.")
|
| 275 |
+
|
| 276 |
+
if do_rescale and rescale_factor is None:
|
| 277 |
+
raise ValueError("Rescale factor must be specified if do_rescale is True.")
|
| 278 |
+
|
| 279 |
+
if do_normalize and (image_mean is None or image_std is None):
|
| 280 |
+
raise ValueError("Image mean and std must be specified if do_normalize is True.")
|
| 281 |
+
|
| 282 |
+
# All transformations expect numpy arrays.
|
| 283 |
+
images = [to_numpy_array(image) for image in images]
|
| 284 |
+
|
| 285 |
+
if is_scaled_image(images[0]) and do_rescale:
|
| 286 |
+
logger.warning_once(
|
| 287 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
| 288 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
if input_data_format is None:
|
| 292 |
+
# We assume that all images have the same channel dimension format.
|
| 293 |
+
input_data_format = infer_channel_dimension_format(images[0])
|
| 294 |
+
|
| 295 |
+
if do_resize:
|
| 296 |
+
images = [
|
| 297 |
+
self.resize(
|
| 298 |
+
image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format
|
| 299 |
+
)
|
| 300 |
+
for image in images
|
| 301 |
+
]
|
| 302 |
+
|
| 303 |
+
if do_rescale:
|
| 304 |
+
images = [
|
| 305 |
+
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
| 306 |
+
for image in images
|
| 307 |
+
]
|
| 308 |
+
|
| 309 |
+
if do_normalize:
|
| 310 |
+
images = [
|
| 311 |
+
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
|
| 312 |
+
for image in images
|
| 313 |
+
]
|
| 314 |
+
|
| 315 |
+
images = [
|
| 316 |
+
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
|
| 317 |
+
]
|
| 318 |
+
|
| 319 |
+
data = {"pixel_values": images}
|
| 320 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py
ADDED
|
@@ -0,0 +1,559 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" PyTorch ConvNext model."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
from typing import Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.utils.checkpoint
|
| 22 |
+
from torch import nn
|
| 23 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 24 |
+
|
| 25 |
+
from ...activations import ACT2FN
|
| 26 |
+
from ...modeling_outputs import (
|
| 27 |
+
BackboneOutput,
|
| 28 |
+
BaseModelOutputWithNoAttention,
|
| 29 |
+
BaseModelOutputWithPoolingAndNoAttention,
|
| 30 |
+
ImageClassifierOutputWithNoAttention,
|
| 31 |
+
)
|
| 32 |
+
from ...modeling_utils import PreTrainedModel
|
| 33 |
+
from ...utils import (
|
| 34 |
+
add_code_sample_docstrings,
|
| 35 |
+
add_start_docstrings,
|
| 36 |
+
add_start_docstrings_to_model_forward,
|
| 37 |
+
logging,
|
| 38 |
+
replace_return_docstrings,
|
| 39 |
+
)
|
| 40 |
+
from ...utils.backbone_utils import BackboneMixin
|
| 41 |
+
from .configuration_convnext import ConvNextConfig
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
logger = logging.get_logger(__name__)
|
| 45 |
+
|
| 46 |
+
# General docstring
|
| 47 |
+
_CONFIG_FOR_DOC = "ConvNextConfig"
|
| 48 |
+
|
| 49 |
+
# Base docstring
|
| 50 |
+
_CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
|
| 51 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
|
| 52 |
+
|
| 53 |
+
# Image classification docstring
|
| 54 |
+
_IMAGE_CLASS_CHECKPOINT = "facebook/convnext-tiny-224"
|
| 55 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
| 56 |
+
|
| 57 |
+
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 58 |
+
"facebook/convnext-tiny-224",
|
| 59 |
+
# See all ConvNext models at https://huggingface.co/models?filter=convnext
|
| 60 |
+
]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# Copied from transformers.models.beit.modeling_beit.drop_path
|
| 64 |
+
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
|
| 65 |
+
"""
|
| 66 |
+
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
| 67 |
+
|
| 68 |
+
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
|
| 69 |
+
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
| 70 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
|
| 71 |
+
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
|
| 72 |
+
argument.
|
| 73 |
+
"""
|
| 74 |
+
if drop_prob == 0.0 or not training:
|
| 75 |
+
return input
|
| 76 |
+
keep_prob = 1 - drop_prob
|
| 77 |
+
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
| 78 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
|
| 79 |
+
random_tensor.floor_() # binarize
|
| 80 |
+
output = input.div(keep_prob) * random_tensor
|
| 81 |
+
return output
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNext
|
| 85 |
+
class ConvNextDropPath(nn.Module):
|
| 86 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
| 87 |
+
|
| 88 |
+
def __init__(self, drop_prob: Optional[float] = None) -> None:
|
| 89 |
+
super().__init__()
|
| 90 |
+
self.drop_prob = drop_prob
|
| 91 |
+
|
| 92 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 93 |
+
return drop_path(hidden_states, self.drop_prob, self.training)
|
| 94 |
+
|
| 95 |
+
def extra_repr(self) -> str:
|
| 96 |
+
return "p={}".format(self.drop_prob)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class ConvNextLayerNorm(nn.Module):
|
| 100 |
+
r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
|
| 101 |
+
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
|
| 102 |
+
width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
|
| 106 |
+
super().__init__()
|
| 107 |
+
self.weight = nn.Parameter(torch.ones(normalized_shape))
|
| 108 |
+
self.bias = nn.Parameter(torch.zeros(normalized_shape))
|
| 109 |
+
self.eps = eps
|
| 110 |
+
self.data_format = data_format
|
| 111 |
+
if self.data_format not in ["channels_last", "channels_first"]:
|
| 112 |
+
raise NotImplementedError(f"Unsupported data format: {self.data_format}")
|
| 113 |
+
self.normalized_shape = (normalized_shape,)
|
| 114 |
+
|
| 115 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 116 |
+
if self.data_format == "channels_last":
|
| 117 |
+
x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
| 118 |
+
elif self.data_format == "channels_first":
|
| 119 |
+
input_dtype = x.dtype
|
| 120 |
+
x = x.float()
|
| 121 |
+
u = x.mean(1, keepdim=True)
|
| 122 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
| 123 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
| 124 |
+
x = x.to(dtype=input_dtype)
|
| 125 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
| 126 |
+
return x
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class ConvNextEmbeddings(nn.Module):
|
| 130 |
+
"""This class is comparable to (and inspired by) the SwinEmbeddings class
|
| 131 |
+
found in src/transformers/models/swin/modeling_swin.py.
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
def __init__(self, config):
|
| 135 |
+
super().__init__()
|
| 136 |
+
self.patch_embeddings = nn.Conv2d(
|
| 137 |
+
config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
|
| 138 |
+
)
|
| 139 |
+
self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
|
| 140 |
+
self.num_channels = config.num_channels
|
| 141 |
+
|
| 142 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
| 143 |
+
num_channels = pixel_values.shape[1]
|
| 144 |
+
if num_channels != self.num_channels:
|
| 145 |
+
raise ValueError(
|
| 146 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
| 147 |
+
)
|
| 148 |
+
embeddings = self.patch_embeddings(pixel_values)
|
| 149 |
+
embeddings = self.layernorm(embeddings)
|
| 150 |
+
return embeddings
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class ConvNextLayer(nn.Module):
|
| 154 |
+
"""This corresponds to the `Block` class in the original implementation.
|
| 155 |
+
|
| 156 |
+
There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
|
| 157 |
+
H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
|
| 158 |
+
|
| 159 |
+
The authors used (2) as they find it slightly faster in PyTorch.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
config ([`ConvNextConfig`]): Model configuration class.
|
| 163 |
+
dim (`int`): Number of input channels.
|
| 164 |
+
drop_path (`float`): Stochastic depth rate. Default: 0.0.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
def __init__(self, config, dim, drop_path=0):
|
| 168 |
+
super().__init__()
|
| 169 |
+
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
|
| 170 |
+
self.layernorm = ConvNextLayerNorm(dim, eps=1e-6)
|
| 171 |
+
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
|
| 172 |
+
self.act = ACT2FN[config.hidden_act]
|
| 173 |
+
self.pwconv2 = nn.Linear(4 * dim, dim)
|
| 174 |
+
self.layer_scale_parameter = (
|
| 175 |
+
nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
|
| 176 |
+
if config.layer_scale_init_value > 0
|
| 177 |
+
else None
|
| 178 |
+
)
|
| 179 |
+
self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 180 |
+
|
| 181 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
|
| 182 |
+
input = hidden_states
|
| 183 |
+
x = self.dwconv(hidden_states)
|
| 184 |
+
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
|
| 185 |
+
x = self.layernorm(x)
|
| 186 |
+
x = self.pwconv1(x)
|
| 187 |
+
x = self.act(x)
|
| 188 |
+
x = self.pwconv2(x)
|
| 189 |
+
if self.layer_scale_parameter is not None:
|
| 190 |
+
x = self.layer_scale_parameter * x
|
| 191 |
+
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
|
| 192 |
+
|
| 193 |
+
x = input + self.drop_path(x)
|
| 194 |
+
return x
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
class ConvNextStage(nn.Module):
|
| 198 |
+
"""ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks.
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
config ([`ConvNextConfig`]): Model configuration class.
|
| 202 |
+
in_channels (`int`): Number of input channels.
|
| 203 |
+
out_channels (`int`): Number of output channels.
|
| 204 |
+
depth (`int`): Number of residual blocks.
|
| 205 |
+
drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
|
| 206 |
+
"""
|
| 207 |
+
|
| 208 |
+
def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
|
| 209 |
+
super().__init__()
|
| 210 |
+
|
| 211 |
+
if in_channels != out_channels or stride > 1:
|
| 212 |
+
self.downsampling_layer = nn.Sequential(
|
| 213 |
+
ConvNextLayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
|
| 214 |
+
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
|
| 215 |
+
)
|
| 216 |
+
else:
|
| 217 |
+
self.downsampling_layer = nn.Identity()
|
| 218 |
+
drop_path_rates = drop_path_rates or [0.0] * depth
|
| 219 |
+
self.layers = nn.Sequential(
|
| 220 |
+
*[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
|
| 224 |
+
hidden_states = self.downsampling_layer(hidden_states)
|
| 225 |
+
hidden_states = self.layers(hidden_states)
|
| 226 |
+
return hidden_states
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
class ConvNextEncoder(nn.Module):
|
| 230 |
+
def __init__(self, config):
|
| 231 |
+
super().__init__()
|
| 232 |
+
self.stages = nn.ModuleList()
|
| 233 |
+
drop_path_rates = [
|
| 234 |
+
x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
|
| 235 |
+
]
|
| 236 |
+
prev_chs = config.hidden_sizes[0]
|
| 237 |
+
for i in range(config.num_stages):
|
| 238 |
+
out_chs = config.hidden_sizes[i]
|
| 239 |
+
stage = ConvNextStage(
|
| 240 |
+
config,
|
| 241 |
+
in_channels=prev_chs,
|
| 242 |
+
out_channels=out_chs,
|
| 243 |
+
stride=2 if i > 0 else 1,
|
| 244 |
+
depth=config.depths[i],
|
| 245 |
+
drop_path_rates=drop_path_rates[i],
|
| 246 |
+
)
|
| 247 |
+
self.stages.append(stage)
|
| 248 |
+
prev_chs = out_chs
|
| 249 |
+
|
| 250 |
+
def forward(
|
| 251 |
+
self,
|
| 252 |
+
hidden_states: torch.FloatTensor,
|
| 253 |
+
output_hidden_states: Optional[bool] = False,
|
| 254 |
+
return_dict: Optional[bool] = True,
|
| 255 |
+
) -> Union[Tuple, BaseModelOutputWithNoAttention]:
|
| 256 |
+
all_hidden_states = () if output_hidden_states else None
|
| 257 |
+
|
| 258 |
+
for i, layer_module in enumerate(self.stages):
|
| 259 |
+
if output_hidden_states:
|
| 260 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 261 |
+
|
| 262 |
+
hidden_states = layer_module(hidden_states)
|
| 263 |
+
|
| 264 |
+
if output_hidden_states:
|
| 265 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 266 |
+
|
| 267 |
+
if not return_dict:
|
| 268 |
+
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
|
| 269 |
+
|
| 270 |
+
return BaseModelOutputWithNoAttention(
|
| 271 |
+
last_hidden_state=hidden_states,
|
| 272 |
+
hidden_states=all_hidden_states,
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
class ConvNextPreTrainedModel(PreTrainedModel):
|
| 277 |
+
"""
|
| 278 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 279 |
+
models.
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
config_class = ConvNextConfig
|
| 283 |
+
base_model_prefix = "convnext"
|
| 284 |
+
main_input_name = "pixel_values"
|
| 285 |
+
supports_gradient_checkpointing = True
|
| 286 |
+
|
| 287 |
+
def _init_weights(self, module):
|
| 288 |
+
"""Initialize the weights"""
|
| 289 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
| 290 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 291 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 292 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 293 |
+
if module.bias is not None:
|
| 294 |
+
module.bias.data.zero_()
|
| 295 |
+
elif isinstance(module, nn.LayerNorm):
|
| 296 |
+
module.bias.data.zero_()
|
| 297 |
+
module.weight.data.fill_(1.0)
|
| 298 |
+
|
| 299 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 300 |
+
if isinstance(module, ConvNextEncoder):
|
| 301 |
+
module.gradient_checkpointing = value
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
CONVNEXT_START_DOCSTRING = r"""
|
| 305 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
| 306 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 307 |
+
behavior.
|
| 308 |
+
|
| 309 |
+
Parameters:
|
| 310 |
+
config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
|
| 311 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 312 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 313 |
+
"""
|
| 314 |
+
|
| 315 |
+
CONVNEXT_INPUTS_DOCSTRING = r"""
|
| 316 |
+
Args:
|
| 317 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 318 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 319 |
+
[`ConvNextImageProcessor.__call__`] for details.
|
| 320 |
+
|
| 321 |
+
output_hidden_states (`bool`, *optional*):
|
| 322 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 323 |
+
more detail.
|
| 324 |
+
return_dict (`bool`, *optional*):
|
| 325 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 326 |
+
"""
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
@add_start_docstrings(
|
| 330 |
+
"The bare ConvNext model outputting raw features without any specific head on top.",
|
| 331 |
+
CONVNEXT_START_DOCSTRING,
|
| 332 |
+
)
|
| 333 |
+
class ConvNextModel(ConvNextPreTrainedModel):
|
| 334 |
+
def __init__(self, config):
|
| 335 |
+
super().__init__(config)
|
| 336 |
+
self.config = config
|
| 337 |
+
|
| 338 |
+
self.embeddings = ConvNextEmbeddings(config)
|
| 339 |
+
self.encoder = ConvNextEncoder(config)
|
| 340 |
+
|
| 341 |
+
# final layernorm layer
|
| 342 |
+
self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
|
| 343 |
+
|
| 344 |
+
# Initialize weights and apply final processing
|
| 345 |
+
self.post_init()
|
| 346 |
+
|
| 347 |
+
@add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
|
| 348 |
+
@add_code_sample_docstrings(
|
| 349 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 350 |
+
output_type=BaseModelOutputWithPoolingAndNoAttention,
|
| 351 |
+
config_class=_CONFIG_FOR_DOC,
|
| 352 |
+
modality="vision",
|
| 353 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
| 354 |
+
)
|
| 355 |
+
def forward(
|
| 356 |
+
self,
|
| 357 |
+
pixel_values: torch.FloatTensor = None,
|
| 358 |
+
output_hidden_states: Optional[bool] = None,
|
| 359 |
+
return_dict: Optional[bool] = None,
|
| 360 |
+
) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
|
| 361 |
+
output_hidden_states = (
|
| 362 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 363 |
+
)
|
| 364 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 365 |
+
|
| 366 |
+
if pixel_values is None:
|
| 367 |
+
raise ValueError("You have to specify pixel_values")
|
| 368 |
+
|
| 369 |
+
embedding_output = self.embeddings(pixel_values)
|
| 370 |
+
|
| 371 |
+
encoder_outputs = self.encoder(
|
| 372 |
+
embedding_output,
|
| 373 |
+
output_hidden_states=output_hidden_states,
|
| 374 |
+
return_dict=return_dict,
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
last_hidden_state = encoder_outputs[0]
|
| 378 |
+
|
| 379 |
+
# global average pooling, (N, C, H, W) -> (N, C)
|
| 380 |
+
pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
|
| 381 |
+
|
| 382 |
+
if not return_dict:
|
| 383 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 384 |
+
|
| 385 |
+
return BaseModelOutputWithPoolingAndNoAttention(
|
| 386 |
+
last_hidden_state=last_hidden_state,
|
| 387 |
+
pooler_output=pooled_output,
|
| 388 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
@add_start_docstrings(
|
| 393 |
+
"""
|
| 394 |
+
ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
|
| 395 |
+
ImageNet.
|
| 396 |
+
""",
|
| 397 |
+
CONVNEXT_START_DOCSTRING,
|
| 398 |
+
)
|
| 399 |
+
class ConvNextForImageClassification(ConvNextPreTrainedModel):
|
| 400 |
+
def __init__(self, config):
|
| 401 |
+
super().__init__(config)
|
| 402 |
+
|
| 403 |
+
self.num_labels = config.num_labels
|
| 404 |
+
self.convnext = ConvNextModel(config)
|
| 405 |
+
|
| 406 |
+
# Classifier head
|
| 407 |
+
self.classifier = (
|
| 408 |
+
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
# Initialize weights and apply final processing
|
| 412 |
+
self.post_init()
|
| 413 |
+
|
| 414 |
+
@add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
|
| 415 |
+
@add_code_sample_docstrings(
|
| 416 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
| 417 |
+
output_type=ImageClassifierOutputWithNoAttention,
|
| 418 |
+
config_class=_CONFIG_FOR_DOC,
|
| 419 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
| 420 |
+
)
|
| 421 |
+
def forward(
|
| 422 |
+
self,
|
| 423 |
+
pixel_values: torch.FloatTensor = None,
|
| 424 |
+
labels: Optional[torch.LongTensor] = None,
|
| 425 |
+
output_hidden_states: Optional[bool] = None,
|
| 426 |
+
return_dict: Optional[bool] = None,
|
| 427 |
+
) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
|
| 428 |
+
r"""
|
| 429 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 430 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
| 431 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 432 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 433 |
+
"""
|
| 434 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 435 |
+
|
| 436 |
+
outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
| 437 |
+
|
| 438 |
+
pooled_output = outputs.pooler_output if return_dict else outputs[1]
|
| 439 |
+
|
| 440 |
+
logits = self.classifier(pooled_output)
|
| 441 |
+
|
| 442 |
+
loss = None
|
| 443 |
+
if labels is not None:
|
| 444 |
+
if self.config.problem_type is None:
|
| 445 |
+
if self.num_labels == 1:
|
| 446 |
+
self.config.problem_type = "regression"
|
| 447 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 448 |
+
self.config.problem_type = "single_label_classification"
|
| 449 |
+
else:
|
| 450 |
+
self.config.problem_type = "multi_label_classification"
|
| 451 |
+
|
| 452 |
+
if self.config.problem_type == "regression":
|
| 453 |
+
loss_fct = MSELoss()
|
| 454 |
+
if self.num_labels == 1:
|
| 455 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 456 |
+
else:
|
| 457 |
+
loss = loss_fct(logits, labels)
|
| 458 |
+
elif self.config.problem_type == "single_label_classification":
|
| 459 |
+
loss_fct = CrossEntropyLoss()
|
| 460 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 461 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 462 |
+
loss_fct = BCEWithLogitsLoss()
|
| 463 |
+
loss = loss_fct(logits, labels)
|
| 464 |
+
if not return_dict:
|
| 465 |
+
output = (logits,) + outputs[2:]
|
| 466 |
+
return ((loss,) + output) if loss is not None else output
|
| 467 |
+
|
| 468 |
+
return ImageClassifierOutputWithNoAttention(
|
| 469 |
+
loss=loss,
|
| 470 |
+
logits=logits,
|
| 471 |
+
hidden_states=outputs.hidden_states,
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
@add_start_docstrings(
|
| 476 |
+
"""
|
| 477 |
+
ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer.
|
| 478 |
+
""",
|
| 479 |
+
CONVNEXT_START_DOCSTRING,
|
| 480 |
+
)
|
| 481 |
+
class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin):
|
| 482 |
+
def __init__(self, config):
|
| 483 |
+
super().__init__(config)
|
| 484 |
+
super()._init_backbone(config)
|
| 485 |
+
|
| 486 |
+
self.embeddings = ConvNextEmbeddings(config)
|
| 487 |
+
self.encoder = ConvNextEncoder(config)
|
| 488 |
+
self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
|
| 489 |
+
|
| 490 |
+
# Add layer norms to hidden states of out_features
|
| 491 |
+
hidden_states_norms = {}
|
| 492 |
+
for stage, num_channels in zip(self._out_features, self.channels):
|
| 493 |
+
hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format="channels_first")
|
| 494 |
+
self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
|
| 495 |
+
|
| 496 |
+
# initialize weights and apply final processing
|
| 497 |
+
self.post_init()
|
| 498 |
+
|
| 499 |
+
@add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
|
| 500 |
+
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
|
| 501 |
+
def forward(
|
| 502 |
+
self,
|
| 503 |
+
pixel_values: torch.Tensor,
|
| 504 |
+
output_hidden_states: Optional[bool] = None,
|
| 505 |
+
return_dict: Optional[bool] = None,
|
| 506 |
+
) -> BackboneOutput:
|
| 507 |
+
"""
|
| 508 |
+
Returns:
|
| 509 |
+
|
| 510 |
+
Examples:
|
| 511 |
+
|
| 512 |
+
```python
|
| 513 |
+
>>> from transformers import AutoImageProcessor, AutoBackbone
|
| 514 |
+
>>> import torch
|
| 515 |
+
>>> from PIL import Image
|
| 516 |
+
>>> import requests
|
| 517 |
+
|
| 518 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 519 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 520 |
+
|
| 521 |
+
>>> processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
|
| 522 |
+
>>> model = AutoBackbone.from_pretrained("facebook/convnext-tiny-224")
|
| 523 |
+
|
| 524 |
+
>>> inputs = processor(image, return_tensors="pt")
|
| 525 |
+
>>> outputs = model(**inputs)
|
| 526 |
+
```"""
|
| 527 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 528 |
+
output_hidden_states = (
|
| 529 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
embedding_output = self.embeddings(pixel_values)
|
| 533 |
+
|
| 534 |
+
outputs = self.encoder(
|
| 535 |
+
embedding_output,
|
| 536 |
+
output_hidden_states=True,
|
| 537 |
+
return_dict=True,
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
hidden_states = outputs.hidden_states
|
| 541 |
+
|
| 542 |
+
feature_maps = ()
|
| 543 |
+
# we skip the stem
|
| 544 |
+
for idx, (stage, hidden_state) in enumerate(zip(self.stage_names[1:], hidden_states[1:])):
|
| 545 |
+
if stage in self.out_features:
|
| 546 |
+
hidden_state = self.hidden_states_norms[stage](hidden_state)
|
| 547 |
+
feature_maps += (hidden_state,)
|
| 548 |
+
|
| 549 |
+
if not return_dict:
|
| 550 |
+
output = (feature_maps,)
|
| 551 |
+
if output_hidden_states:
|
| 552 |
+
output += (outputs.hidden_states,)
|
| 553 |
+
return output
|
| 554 |
+
|
| 555 |
+
return BackboneOutput(
|
| 556 |
+
feature_maps=feature_maps,
|
| 557 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
| 558 |
+
attentions=None,
|
| 559 |
+
)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py
ADDED
|
@@ -0,0 +1,566 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" TF 2.0 ConvNext model."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
from __future__ import annotations
|
| 19 |
+
|
| 20 |
+
from typing import Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import tensorflow as tf
|
| 24 |
+
|
| 25 |
+
from ...activations_tf import get_tf_activation
|
| 26 |
+
from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput
|
| 27 |
+
from ...modeling_tf_utils import (
|
| 28 |
+
TFModelInputType,
|
| 29 |
+
TFPreTrainedModel,
|
| 30 |
+
TFSequenceClassificationLoss,
|
| 31 |
+
get_initializer,
|
| 32 |
+
keras_serializable,
|
| 33 |
+
unpack_inputs,
|
| 34 |
+
)
|
| 35 |
+
from ...tf_utils import shape_list
|
| 36 |
+
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
| 37 |
+
from .configuration_convnext import ConvNextConfig
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
logger = logging.get_logger(__name__)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
_CONFIG_FOR_DOC = "ConvNextConfig"
|
| 44 |
+
_CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class TFConvNextDropPath(tf.keras.layers.Layer):
|
| 48 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
| 49 |
+
References:
|
| 50 |
+
(1) github.com:rwightman/pytorch-image-models
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self, drop_path, **kwargs):
|
| 54 |
+
super().__init__(**kwargs)
|
| 55 |
+
self.drop_path = drop_path
|
| 56 |
+
|
| 57 |
+
def call(self, x, training=None):
|
| 58 |
+
if training:
|
| 59 |
+
keep_prob = 1 - self.drop_path
|
| 60 |
+
shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
|
| 61 |
+
random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
|
| 62 |
+
random_tensor = tf.floor(random_tensor)
|
| 63 |
+
return (x / keep_prob) * random_tensor
|
| 64 |
+
return x
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class TFConvNextEmbeddings(tf.keras.layers.Layer):
|
| 68 |
+
"""This class is comparable to (and inspired by) the SwinEmbeddings class
|
| 69 |
+
found in src/transformers/models/swin/modeling_swin.py.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(self, config, **kwargs):
|
| 73 |
+
super().__init__(**kwargs)
|
| 74 |
+
self.patch_embeddings = tf.keras.layers.Conv2D(
|
| 75 |
+
filters=config.hidden_sizes[0],
|
| 76 |
+
kernel_size=config.patch_size,
|
| 77 |
+
strides=config.patch_size,
|
| 78 |
+
name="patch_embeddings",
|
| 79 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 80 |
+
bias_initializer="zeros",
|
| 81 |
+
)
|
| 82 |
+
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
|
| 83 |
+
self.num_channels = config.num_channels
|
| 84 |
+
|
| 85 |
+
def call(self, pixel_values):
|
| 86 |
+
if isinstance(pixel_values, dict):
|
| 87 |
+
pixel_values = pixel_values["pixel_values"]
|
| 88 |
+
|
| 89 |
+
num_channels = shape_list(pixel_values)[1]
|
| 90 |
+
if tf.executing_eagerly() and num_channels != self.num_channels:
|
| 91 |
+
raise ValueError(
|
| 92 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
|
| 96 |
+
# So change the input format from `NCHW` to `NHWC`.
|
| 97 |
+
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
|
| 98 |
+
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
|
| 99 |
+
|
| 100 |
+
embeddings = self.patch_embeddings(pixel_values)
|
| 101 |
+
embeddings = self.layernorm(embeddings)
|
| 102 |
+
return embeddings
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
class TFConvNextLayer(tf.keras.layers.Layer):
|
| 106 |
+
"""This corresponds to the `Block` class in the original implementation.
|
| 107 |
+
|
| 108 |
+
There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
|
| 109 |
+
H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
|
| 110 |
+
|
| 111 |
+
The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
|
| 112 |
+
NHWC ordering, we can just apply the operations straight-away without the permutation.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
config ([`ConvNextConfig`]): Model configuration class.
|
| 116 |
+
dim (`int`): Number of input channels.
|
| 117 |
+
drop_path (`float`): Stochastic depth rate. Default: 0.0.
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
def __init__(self, config, dim, drop_path=0.0, **kwargs):
|
| 121 |
+
super().__init__(**kwargs)
|
| 122 |
+
self.dim = dim
|
| 123 |
+
self.config = config
|
| 124 |
+
self.dwconv = tf.keras.layers.Conv2D(
|
| 125 |
+
filters=dim,
|
| 126 |
+
kernel_size=7,
|
| 127 |
+
padding="same",
|
| 128 |
+
groups=dim,
|
| 129 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 130 |
+
bias_initializer="zeros",
|
| 131 |
+
name="dwconv",
|
| 132 |
+
) # depthwise conv
|
| 133 |
+
self.layernorm = tf.keras.layers.LayerNormalization(
|
| 134 |
+
epsilon=1e-6,
|
| 135 |
+
name="layernorm",
|
| 136 |
+
)
|
| 137 |
+
self.pwconv1 = tf.keras.layers.Dense(
|
| 138 |
+
units=4 * dim,
|
| 139 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 140 |
+
bias_initializer="zeros",
|
| 141 |
+
name="pwconv1",
|
| 142 |
+
) # pointwise/1x1 convs, implemented with linear layers
|
| 143 |
+
self.act = get_tf_activation(config.hidden_act)
|
| 144 |
+
self.pwconv2 = tf.keras.layers.Dense(
|
| 145 |
+
units=dim,
|
| 146 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 147 |
+
bias_initializer="zeros",
|
| 148 |
+
name="pwconv2",
|
| 149 |
+
)
|
| 150 |
+
# Using `layers.Activation` instead of `tf.identity` to better control `training`
|
| 151 |
+
# behaviour.
|
| 152 |
+
self.drop_path = (
|
| 153 |
+
TFConvNextDropPath(drop_path, name="drop_path")
|
| 154 |
+
if drop_path > 0.0
|
| 155 |
+
else tf.keras.layers.Activation("linear", name="drop_path")
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
def build(self, input_shape: tf.TensorShape = None):
|
| 159 |
+
# PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
|
| 160 |
+
self.layer_scale_parameter = (
|
| 161 |
+
self.add_weight(
|
| 162 |
+
shape=(self.dim,),
|
| 163 |
+
initializer=tf.keras.initializers.Constant(value=self.config.layer_scale_init_value),
|
| 164 |
+
trainable=True,
|
| 165 |
+
name="layer_scale_parameter",
|
| 166 |
+
)
|
| 167 |
+
if self.config.layer_scale_init_value > 0
|
| 168 |
+
else None
|
| 169 |
+
)
|
| 170 |
+
super().build(input_shape)
|
| 171 |
+
|
| 172 |
+
def call(self, hidden_states, training=False):
|
| 173 |
+
input = hidden_states
|
| 174 |
+
x = self.dwconv(hidden_states)
|
| 175 |
+
x = self.layernorm(x)
|
| 176 |
+
x = self.pwconv1(x)
|
| 177 |
+
x = self.act(x)
|
| 178 |
+
x = self.pwconv2(x)
|
| 179 |
+
|
| 180 |
+
if self.layer_scale_parameter is not None:
|
| 181 |
+
x = self.layer_scale_parameter * x
|
| 182 |
+
|
| 183 |
+
x = input + self.drop_path(x, training=training)
|
| 184 |
+
return x
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
class TFConvNextStage(tf.keras.layers.Layer):
|
| 188 |
+
"""ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
config ([`ConvNextConfig`]): Model configuration class.
|
| 192 |
+
in_channels (`int`): Number of input channels.
|
| 193 |
+
out_channels (`int`): Number of output channels.
|
| 194 |
+
depth (`int`): Number of residual blocks.
|
| 195 |
+
drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
def __init__(
|
| 199 |
+
self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None, **kwargs
|
| 200 |
+
):
|
| 201 |
+
super().__init__(**kwargs)
|
| 202 |
+
if in_channels != out_channels or stride > 1:
|
| 203 |
+
self.downsampling_layer = [
|
| 204 |
+
tf.keras.layers.LayerNormalization(
|
| 205 |
+
epsilon=1e-6,
|
| 206 |
+
name="downsampling_layer.0",
|
| 207 |
+
),
|
| 208 |
+
# Inputs to this layer will follow NHWC format since we
|
| 209 |
+
# transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings`
|
| 210 |
+
# layer. All the outputs throughout the model will be in NHWC
|
| 211 |
+
# from this point on until the output where we again change to
|
| 212 |
+
# NCHW.
|
| 213 |
+
tf.keras.layers.Conv2D(
|
| 214 |
+
filters=out_channels,
|
| 215 |
+
kernel_size=kernel_size,
|
| 216 |
+
strides=stride,
|
| 217 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 218 |
+
bias_initializer="zeros",
|
| 219 |
+
name="downsampling_layer.1",
|
| 220 |
+
),
|
| 221 |
+
]
|
| 222 |
+
else:
|
| 223 |
+
self.downsampling_layer = [tf.identity]
|
| 224 |
+
|
| 225 |
+
drop_path_rates = drop_path_rates or [0.0] * depth
|
| 226 |
+
self.layers = [
|
| 227 |
+
TFConvNextLayer(
|
| 228 |
+
config,
|
| 229 |
+
dim=out_channels,
|
| 230 |
+
drop_path=drop_path_rates[j],
|
| 231 |
+
name=f"layers.{j}",
|
| 232 |
+
)
|
| 233 |
+
for j in range(depth)
|
| 234 |
+
]
|
| 235 |
+
|
| 236 |
+
def call(self, hidden_states):
|
| 237 |
+
for layer in self.downsampling_layer:
|
| 238 |
+
hidden_states = layer(hidden_states)
|
| 239 |
+
for layer in self.layers:
|
| 240 |
+
hidden_states = layer(hidden_states)
|
| 241 |
+
return hidden_states
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class TFConvNextEncoder(tf.keras.layers.Layer):
|
| 245 |
+
def __init__(self, config, **kwargs):
|
| 246 |
+
super().__init__(**kwargs)
|
| 247 |
+
self.stages = []
|
| 248 |
+
drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
|
| 249 |
+
drop_path_rates = tf.split(drop_path_rates, config.depths)
|
| 250 |
+
drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
|
| 251 |
+
prev_chs = config.hidden_sizes[0]
|
| 252 |
+
for i in range(config.num_stages):
|
| 253 |
+
out_chs = config.hidden_sizes[i]
|
| 254 |
+
stage = TFConvNextStage(
|
| 255 |
+
config,
|
| 256 |
+
in_channels=prev_chs,
|
| 257 |
+
out_channels=out_chs,
|
| 258 |
+
stride=2 if i > 0 else 1,
|
| 259 |
+
depth=config.depths[i],
|
| 260 |
+
drop_path_rates=drop_path_rates[i],
|
| 261 |
+
name=f"stages.{i}",
|
| 262 |
+
)
|
| 263 |
+
self.stages.append(stage)
|
| 264 |
+
prev_chs = out_chs
|
| 265 |
+
|
| 266 |
+
def call(self, hidden_states, output_hidden_states=False, return_dict=True):
|
| 267 |
+
all_hidden_states = () if output_hidden_states else None
|
| 268 |
+
|
| 269 |
+
for i, layer_module in enumerate(self.stages):
|
| 270 |
+
if output_hidden_states:
|
| 271 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 272 |
+
|
| 273 |
+
hidden_states = layer_module(hidden_states)
|
| 274 |
+
|
| 275 |
+
if output_hidden_states:
|
| 276 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 277 |
+
|
| 278 |
+
if not return_dict:
|
| 279 |
+
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
|
| 280 |
+
|
| 281 |
+
return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
@keras_serializable
|
| 285 |
+
class TFConvNextMainLayer(tf.keras.layers.Layer):
|
| 286 |
+
config_class = ConvNextConfig
|
| 287 |
+
|
| 288 |
+
def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs):
|
| 289 |
+
super().__init__(**kwargs)
|
| 290 |
+
|
| 291 |
+
self.config = config
|
| 292 |
+
self.embeddings = TFConvNextEmbeddings(config, name="embeddings")
|
| 293 |
+
self.encoder = TFConvNextEncoder(config, name="encoder")
|
| 294 |
+
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
|
| 295 |
+
# We are setting the `data_format` like so because from here on we will revert to the
|
| 296 |
+
# NCHW output format
|
| 297 |
+
self.pooler = tf.keras.layers.GlobalAvgPool2D(data_format="channels_first") if add_pooling_layer else None
|
| 298 |
+
|
| 299 |
+
@unpack_inputs
|
| 300 |
+
def call(
|
| 301 |
+
self,
|
| 302 |
+
pixel_values: TFModelInputType | None = None,
|
| 303 |
+
output_hidden_states: Optional[bool] = None,
|
| 304 |
+
return_dict: Optional[bool] = None,
|
| 305 |
+
training: bool = False,
|
| 306 |
+
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
|
| 307 |
+
output_hidden_states = (
|
| 308 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 309 |
+
)
|
| 310 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 311 |
+
|
| 312 |
+
if pixel_values is None:
|
| 313 |
+
raise ValueError("You have to specify pixel_values")
|
| 314 |
+
|
| 315 |
+
embedding_output = self.embeddings(pixel_values, training=training)
|
| 316 |
+
|
| 317 |
+
encoder_outputs = self.encoder(
|
| 318 |
+
embedding_output,
|
| 319 |
+
output_hidden_states=output_hidden_states,
|
| 320 |
+
return_dict=return_dict,
|
| 321 |
+
training=training,
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
last_hidden_state = encoder_outputs[0]
|
| 325 |
+
# Change to NCHW output format have uniformity in the modules
|
| 326 |
+
last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
|
| 327 |
+
pooled_output = self.layernorm(self.pooler(last_hidden_state))
|
| 328 |
+
|
| 329 |
+
# Change the other hidden state outputs to NCHW as well
|
| 330 |
+
if output_hidden_states:
|
| 331 |
+
hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
|
| 332 |
+
|
| 333 |
+
if not return_dict:
|
| 334 |
+
hidden_states = hidden_states if output_hidden_states else ()
|
| 335 |
+
return (last_hidden_state, pooled_output) + hidden_states
|
| 336 |
+
|
| 337 |
+
return TFBaseModelOutputWithPooling(
|
| 338 |
+
last_hidden_state=last_hidden_state,
|
| 339 |
+
pooler_output=pooled_output,
|
| 340 |
+
hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
class TFConvNextPreTrainedModel(TFPreTrainedModel):
|
| 345 |
+
"""
|
| 346 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 347 |
+
models.
|
| 348 |
+
"""
|
| 349 |
+
|
| 350 |
+
config_class = ConvNextConfig
|
| 351 |
+
base_model_prefix = "convnext"
|
| 352 |
+
main_input_name = "pixel_values"
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
CONVNEXT_START_DOCSTRING = r"""
|
| 356 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 357 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 358 |
+
etc.)
|
| 359 |
+
|
| 360 |
+
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
| 361 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
| 362 |
+
behavior.
|
| 363 |
+
|
| 364 |
+
<Tip>
|
| 365 |
+
|
| 366 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
| 367 |
+
|
| 368 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
| 369 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
| 370 |
+
|
| 371 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
| 372 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
| 373 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
| 374 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
| 375 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
| 376 |
+
positional argument:
|
| 377 |
+
|
| 378 |
+
- a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
|
| 379 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
| 380 |
+
`model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
|
| 381 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
| 382 |
+
`model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
|
| 383 |
+
|
| 384 |
+
Note that when creating models and layers with
|
| 385 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
| 386 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
| 387 |
+
|
| 388 |
+
</Tip>
|
| 389 |
+
|
| 390 |
+
Parameters:
|
| 391 |
+
config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
|
| 392 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 393 |
+
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
|
| 394 |
+
"""
|
| 395 |
+
|
| 396 |
+
CONVNEXT_INPUTS_DOCSTRING = r"""
|
| 397 |
+
Args:
|
| 398 |
+
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
|
| 399 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 400 |
+
[`ConvNextImageProcessor.__call__`] for details.
|
| 401 |
+
|
| 402 |
+
output_hidden_states (`bool`, *optional*):
|
| 403 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 404 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
| 405 |
+
used instead.
|
| 406 |
+
return_dict (`bool`, *optional*):
|
| 407 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
| 408 |
+
eager mode, in graph mode the value will always be set to True.
|
| 409 |
+
"""
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
@add_start_docstrings(
|
| 413 |
+
"The bare ConvNext model outputting raw features without any specific head on top.",
|
| 414 |
+
CONVNEXT_START_DOCSTRING,
|
| 415 |
+
)
|
| 416 |
+
class TFConvNextModel(TFConvNextPreTrainedModel):
|
| 417 |
+
def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs):
|
| 418 |
+
super().__init__(config, *inputs, **kwargs)
|
| 419 |
+
self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name="convnext")
|
| 420 |
+
|
| 421 |
+
@unpack_inputs
|
| 422 |
+
@add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
|
| 423 |
+
@replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
|
| 424 |
+
def call(
|
| 425 |
+
self,
|
| 426 |
+
pixel_values: TFModelInputType | None = None,
|
| 427 |
+
output_hidden_states: Optional[bool] = None,
|
| 428 |
+
return_dict: Optional[bool] = None,
|
| 429 |
+
training: bool = False,
|
| 430 |
+
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
|
| 431 |
+
r"""
|
| 432 |
+
Returns:
|
| 433 |
+
|
| 434 |
+
Examples:
|
| 435 |
+
|
| 436 |
+
```python
|
| 437 |
+
>>> from transformers import AutoImageProcessor, TFConvNextModel
|
| 438 |
+
>>> from PIL import Image
|
| 439 |
+
>>> import requests
|
| 440 |
+
|
| 441 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 442 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 443 |
+
|
| 444 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
|
| 445 |
+
>>> model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224")
|
| 446 |
+
|
| 447 |
+
>>> inputs = image_processor(images=image, return_tensors="tf")
|
| 448 |
+
>>> outputs = model(**inputs)
|
| 449 |
+
>>> last_hidden_states = outputs.last_hidden_state
|
| 450 |
+
```"""
|
| 451 |
+
output_hidden_states = (
|
| 452 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 453 |
+
)
|
| 454 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 455 |
+
|
| 456 |
+
if pixel_values is None:
|
| 457 |
+
raise ValueError("You have to specify pixel_values")
|
| 458 |
+
|
| 459 |
+
outputs = self.convnext(
|
| 460 |
+
pixel_values=pixel_values,
|
| 461 |
+
output_hidden_states=output_hidden_states,
|
| 462 |
+
return_dict=return_dict,
|
| 463 |
+
training=training,
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
if not return_dict:
|
| 467 |
+
return (outputs[0],) + outputs[1:]
|
| 468 |
+
|
| 469 |
+
return TFBaseModelOutputWithPooling(
|
| 470 |
+
last_hidden_state=outputs.last_hidden_state,
|
| 471 |
+
pooler_output=outputs.pooler_output,
|
| 472 |
+
hidden_states=outputs.hidden_states,
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
@add_start_docstrings(
|
| 477 |
+
"""
|
| 478 |
+
ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
|
| 479 |
+
ImageNet.
|
| 480 |
+
""",
|
| 481 |
+
CONVNEXT_START_DOCSTRING,
|
| 482 |
+
)
|
| 483 |
+
class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss):
|
| 484 |
+
def __init__(self, config: ConvNextConfig, *inputs, **kwargs):
|
| 485 |
+
super().__init__(config, *inputs, **kwargs)
|
| 486 |
+
|
| 487 |
+
self.num_labels = config.num_labels
|
| 488 |
+
self.convnext = TFConvNextMainLayer(config, name="convnext")
|
| 489 |
+
|
| 490 |
+
# Classifier head
|
| 491 |
+
self.classifier = tf.keras.layers.Dense(
|
| 492 |
+
units=config.num_labels,
|
| 493 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 494 |
+
bias_initializer="zeros",
|
| 495 |
+
name="classifier",
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
@unpack_inputs
|
| 499 |
+
@add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
|
| 500 |
+
@replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
|
| 501 |
+
def call(
|
| 502 |
+
self,
|
| 503 |
+
pixel_values: TFModelInputType | None = None,
|
| 504 |
+
output_hidden_states: Optional[bool] = None,
|
| 505 |
+
return_dict: Optional[bool] = None,
|
| 506 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
| 507 |
+
training: Optional[bool] = False,
|
| 508 |
+
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
|
| 509 |
+
r"""
|
| 510 |
+
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
|
| 511 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
| 512 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 513 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 514 |
+
|
| 515 |
+
Returns:
|
| 516 |
+
|
| 517 |
+
Examples:
|
| 518 |
+
|
| 519 |
+
```python
|
| 520 |
+
>>> from transformers import AutoImageProcessor, TFConvNextForImageClassification
|
| 521 |
+
>>> import tensorflow as tf
|
| 522 |
+
>>> from PIL import Image
|
| 523 |
+
>>> import requests
|
| 524 |
+
|
| 525 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 526 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 527 |
+
|
| 528 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
|
| 529 |
+
>>> model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224")
|
| 530 |
+
|
| 531 |
+
>>> inputs = image_processor(images=image, return_tensors="tf")
|
| 532 |
+
>>> outputs = model(**inputs)
|
| 533 |
+
>>> logits = outputs.logits
|
| 534 |
+
>>> # model predicts one of the 1000 ImageNet classes
|
| 535 |
+
>>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
|
| 536 |
+
>>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
|
| 537 |
+
```"""
|
| 538 |
+
output_hidden_states = (
|
| 539 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 540 |
+
)
|
| 541 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 542 |
+
|
| 543 |
+
if pixel_values is None:
|
| 544 |
+
raise ValueError("You have to specify pixel_values")
|
| 545 |
+
|
| 546 |
+
outputs = self.convnext(
|
| 547 |
+
pixel_values,
|
| 548 |
+
output_hidden_states=output_hidden_states,
|
| 549 |
+
return_dict=return_dict,
|
| 550 |
+
training=training,
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
pooled_output = outputs.pooler_output if return_dict else outputs[1]
|
| 554 |
+
|
| 555 |
+
logits = self.classifier(pooled_output)
|
| 556 |
+
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
|
| 557 |
+
|
| 558 |
+
if not return_dict:
|
| 559 |
+
output = (logits,) + outputs[2:]
|
| 560 |
+
return ((loss,) + output) if loss is not None else output
|
| 561 |
+
|
| 562 |
+
return TFSequenceClassifierOutput(
|
| 563 |
+
loss=loss,
|
| 564 |
+
logits=logits,
|
| 565 |
+
hidden_states=outputs.hidden_states,
|
| 566 |
+
)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/__init__.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
| 3 |
+
# module, but to preserve other warnings. So, don't check this module at all.
|
| 4 |
+
|
| 5 |
+
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
|
| 6 |
+
#
|
| 7 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 8 |
+
# you may not use this file except in compliance with the License.
|
| 9 |
+
# You may obtain a copy of the License at
|
| 10 |
+
#
|
| 11 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 12 |
+
#
|
| 13 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 14 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 15 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 16 |
+
# See the License for the specific language governing permissions and
|
| 17 |
+
# limitations under the License.
|
| 18 |
+
from typing import TYPE_CHECKING
|
| 19 |
+
|
| 20 |
+
# rely on isort to merge the imports
|
| 21 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
_import_structure = {
|
| 25 |
+
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
|
| 26 |
+
"tokenization_cpmant": ["CpmAntTokenizer"],
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
if not is_torch_available():
|
| 31 |
+
raise OptionalDependencyNotAvailable()
|
| 32 |
+
except OptionalDependencyNotAvailable:
|
| 33 |
+
pass
|
| 34 |
+
else:
|
| 35 |
+
_import_structure["modeling_cpmant"] = [
|
| 36 |
+
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
| 37 |
+
"CpmAntForCausalLM",
|
| 38 |
+
"CpmAntModel",
|
| 39 |
+
"CpmAntPreTrainedModel",
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
if TYPE_CHECKING:
|
| 44 |
+
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
|
| 45 |
+
from .tokenization_cpmant import CpmAntTokenizer
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
if not is_torch_available():
|
| 49 |
+
raise OptionalDependencyNotAvailable()
|
| 50 |
+
except OptionalDependencyNotAvailable:
|
| 51 |
+
pass
|
| 52 |
+
else:
|
| 53 |
+
from .modeling_cpmant import (
|
| 54 |
+
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
| 55 |
+
CpmAntForCausalLM,
|
| 56 |
+
CpmAntModel,
|
| 57 |
+
CpmAntPreTrainedModel,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
else:
|
| 62 |
+
import sys
|
| 63 |
+
|
| 64 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.01 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/configuration_cpmant.cpython-310.pyc
ADDED
|
Binary file (4.74 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/modeling_cpmant.cpython-310.pyc
ADDED
|
Binary file (29.5 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/configuration_cpmant.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" CPMAnt model configuration"""
|
| 16 |
+
|
| 17 |
+
from ...configuration_utils import PretrainedConfig
|
| 18 |
+
from ...utils import logging
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.get_logger(__name__)
|
| 22 |
+
|
| 23 |
+
CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 24 |
+
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/config.json"
|
| 25 |
+
# See all CPMAnt models at https://huggingface.co/models?filter=cpmant
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class CpmAntConfig(PretrainedConfig):
|
| 30 |
+
r"""
|
| 31 |
+
This is the configuration class to store the configuration of a [`CpmAntModel`]. It is used to instantiate an
|
| 32 |
+
CPMAnt model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 33 |
+
with the defaults will yield a similar configuration to that of the CPMAnt
|
| 34 |
+
[openbmb/cpm-ant-10b](https://huggingface.co/openbmb/cpm-ant-10b) architecture.
|
| 35 |
+
|
| 36 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 37 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
vocab_size (`int`, *optional*, defaults to 30720):
|
| 41 |
+
Vocabulary size of the CPMAnt model. Defines the number of different tokens that can be represented by the
|
| 42 |
+
`input` passed when calling [`CpmAntModel`].
|
| 43 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
| 44 |
+
Dimension of the encoder layers.
|
| 45 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
| 46 |
+
Number of attention heads in the Transformer encoder.
|
| 47 |
+
dim_head (`int`, *optional*, defaults to 128):
|
| 48 |
+
Dimension of attention heads for each attention layer in the Transformer encoder.
|
| 49 |
+
dim_ff (`int`, *optional*, defaults to 10240):
|
| 50 |
+
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 51 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
| 52 |
+
Number of layers of the Transformer encoder.
|
| 53 |
+
dropout_p (`float`, *optional*, defaults to 0.1):
|
| 54 |
+
The dropout probabilitiy for all fully connected layers in the embeddings, encoder.
|
| 55 |
+
position_bias_num_buckets (`int`, *optional*, defaults to 512):
|
| 56 |
+
The number of position_bias buckets.
|
| 57 |
+
position_bias_max_distance (`int`, *optional*, defaults to 2048):
|
| 58 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 59 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 60 |
+
eps (`float`, *optional*, defaults to 1e-6):
|
| 61 |
+
The epsilon used by the layer normalization layers.
|
| 62 |
+
prompt_types (`int`, *optional*, defaults to 32):
|
| 63 |
+
The type of prompt.
|
| 64 |
+
prompt_length (`int`, *optional*, defaults to 32):
|
| 65 |
+
The length of prompt.
|
| 66 |
+
segment_types (`int`, *optional*, defaults to 32):
|
| 67 |
+
The type of segment.
|
| 68 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 69 |
+
Whether to use cache.
|
| 70 |
+
init_std (`float`, *optional*, defaults to 1.0):
|
| 71 |
+
Initialize parameters with std = init_std.
|
| 72 |
+
|
| 73 |
+
Example:
|
| 74 |
+
|
| 75 |
+
```python
|
| 76 |
+
>>> from transformers import CpmAntModel, CpmAntConfig
|
| 77 |
+
|
| 78 |
+
>>> # Initializing a CPMAnt cpm-ant-10b style configuration
|
| 79 |
+
>>> configuration = CpmAntConfig()
|
| 80 |
+
|
| 81 |
+
>>> # Initializing a model from the cpm-ant-10b style configuration
|
| 82 |
+
>>> model = CpmAntModel(configuration)
|
| 83 |
+
|
| 84 |
+
>>> # Accessing the model configuration
|
| 85 |
+
>>> configuration = model.config
|
| 86 |
+
```"""
|
| 87 |
+
model_type = "cpmant"
|
| 88 |
+
|
| 89 |
+
def __init__(
|
| 90 |
+
self,
|
| 91 |
+
vocab_size: int = 30720,
|
| 92 |
+
hidden_size: int = 4096,
|
| 93 |
+
num_attention_heads: int = 32,
|
| 94 |
+
dim_head: int = 128,
|
| 95 |
+
dim_ff: int = 10240,
|
| 96 |
+
num_hidden_layers: int = 48,
|
| 97 |
+
dropout_p: int = 0.0,
|
| 98 |
+
position_bias_num_buckets: int = 512,
|
| 99 |
+
position_bias_max_distance: int = 2048,
|
| 100 |
+
eps: int = 1e-6,
|
| 101 |
+
init_std: float = 1.0,
|
| 102 |
+
prompt_types: int = 32,
|
| 103 |
+
prompt_length: int = 32,
|
| 104 |
+
segment_types: int = 32,
|
| 105 |
+
use_cache: bool = True,
|
| 106 |
+
**kwargs,
|
| 107 |
+
):
|
| 108 |
+
super().__init__(**kwargs)
|
| 109 |
+
self.prompt_types = prompt_types
|
| 110 |
+
self.prompt_length = prompt_length
|
| 111 |
+
self.segment_types = segment_types
|
| 112 |
+
self.hidden_size = hidden_size
|
| 113 |
+
self.num_attention_heads = num_attention_heads
|
| 114 |
+
self.dim_head = dim_head
|
| 115 |
+
self.dim_ff = dim_ff
|
| 116 |
+
self.num_hidden_layers = num_hidden_layers
|
| 117 |
+
self.position_bias_num_buckets = position_bias_num_buckets
|
| 118 |
+
self.position_bias_max_distance = position_bias_max_distance
|
| 119 |
+
self.dropout_p = dropout_p
|
| 120 |
+
self.eps = eps
|
| 121 |
+
self.use_cache = use_cache
|
| 122 |
+
self.vocab_size = vocab_size
|
| 123 |
+
self.init_std = init_std
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/modeling_cpmant.py
ADDED
|
@@ -0,0 +1,879 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" PyTorch CPMAnt"""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
from typing import List, Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
import torch.utils.checkpoint
|
| 24 |
+
from torch import nn
|
| 25 |
+
from torch.nn import CrossEntropyLoss
|
| 26 |
+
|
| 27 |
+
from ...activations import ACT2FN
|
| 28 |
+
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 29 |
+
from ...modeling_utils import PreTrainedModel
|
| 30 |
+
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
|
| 31 |
+
from .configuration_cpmant import CpmAntConfig
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
logger = logging.get_logger(__name__)
|
| 35 |
+
|
| 36 |
+
_CHECKPOINT_FOR_DOC = "openbmb/cpm-ant-10b"
|
| 37 |
+
_CONFIG_FOR_DOC = "CpmAntConfig"
|
| 38 |
+
|
| 39 |
+
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 40 |
+
"openbmb/cpm-ant-10b",
|
| 41 |
+
# See all CPMAnt models at https://huggingface.co/models?filter=cpmant
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class CpmAntLayerNorm(nn.Module):
|
| 46 |
+
"""
|
| 47 |
+
We use Root Mean Square (RMS) Layer Normalization, please see https://arxiv.org/abs/1910.07467 for details."
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(self, config: CpmAntConfig):
|
| 51 |
+
super().__init__()
|
| 52 |
+
|
| 53 |
+
self.eps = config.eps
|
| 54 |
+
self.dim_norm = config.hidden_size
|
| 55 |
+
self.weight = nn.Parameter(torch.empty(config.hidden_size))
|
| 56 |
+
|
| 57 |
+
def forward(self, hidden_states: torch.Tensor):
|
| 58 |
+
"""
|
| 59 |
+
Args:
|
| 60 |
+
hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)
|
| 61 |
+
"""
|
| 62 |
+
if hidden_states.size(-1) != self.dim_norm:
|
| 63 |
+
raise AssertionError("hidden_states.size(-1) != self.dim_norm")
|
| 64 |
+
old_dtype = hidden_states.dtype
|
| 65 |
+
variance = hidden_states.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)
|
| 66 |
+
hidden_states = (hidden_states * torch.rsqrt(variance + self.eps)).to(old_dtype) * self.weight
|
| 67 |
+
return hidden_states
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class CpmAntAttention(nn.Module):
|
| 71 |
+
def __init__(self, config: CpmAntConfig):
|
| 72 |
+
super().__init__()
|
| 73 |
+
self.dim_model = config.hidden_size
|
| 74 |
+
self.num_heads = config.num_attention_heads
|
| 75 |
+
self.dim_head = config.dim_head
|
| 76 |
+
|
| 77 |
+
self.project_q = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False)
|
| 78 |
+
self.project_k = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False)
|
| 79 |
+
self.project_v = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False)
|
| 80 |
+
|
| 81 |
+
self.attention_out = nn.Linear(self.num_heads * self.dim_head, self.dim_model, bias=False)
|
| 82 |
+
|
| 83 |
+
self.softmax = torch.nn.Softmax(dim=-1)
|
| 84 |
+
|
| 85 |
+
if config.dropout_p is not None:
|
| 86 |
+
self.dropout = torch.nn.Dropout(p=config.dropout_p)
|
| 87 |
+
else:
|
| 88 |
+
self.dropout = None
|
| 89 |
+
|
| 90 |
+
def forward(
|
| 91 |
+
self,
|
| 92 |
+
hidden_q: torch.Tensor,
|
| 93 |
+
hidden_kv: torch.Tensor,
|
| 94 |
+
attention_mask: torch.BoolTensor,
|
| 95 |
+
position_bias: torch.Tensor,
|
| 96 |
+
output_attentions: Optional[bool] = False,
|
| 97 |
+
past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 98 |
+
use_cache: Optional[bool] = None,
|
| 99 |
+
):
|
| 100 |
+
"""
|
| 101 |
+
Args:
|
| 102 |
+
hidden_q (`torch.Tensor`):
|
| 103 |
+
Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences.
|
| 104 |
+
hidden_kv (`torch.Tensor` of shape `(batch, len_k, dim_model)`)):
|
| 105 |
+
Tensor *key_value* and *query* of shape `(batch, len_k, dim_model)`
|
| 106 |
+
attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):
|
| 107 |
+
Avoid invalid areas to participate in the calculation of self-attention.
|
| 108 |
+
position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):
|
| 109 |
+
Provide positional information to self-attention block.
|
| 110 |
+
output_attentions (`bool`, *optional*):
|
| 111 |
+
Whether or not to return the attentions tensors of all attention layers.
|
| 112 |
+
past_key_values (`Tuple[torch.Tensor, torch.Tensor]`, *optional*):
|
| 113 |
+
Cached past key and value projection states.
|
| 114 |
+
use_cache (`bool`, *optional*):
|
| 115 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 116 |
+
(see `past_key_values`).
|
| 117 |
+
"""
|
| 118 |
+
batch_size = hidden_q.size(0)
|
| 119 |
+
len_q = hidden_q.size(1)
|
| 120 |
+
len_k = hidden_kv.size(1)
|
| 121 |
+
|
| 122 |
+
query = self.project_q(hidden_q)
|
| 123 |
+
key = self.project_k(hidden_kv)
|
| 124 |
+
value = self.project_v(hidden_kv)
|
| 125 |
+
|
| 126 |
+
query = query.view(batch_size, len_q, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
|
| 127 |
+
key = key.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
|
| 128 |
+
value = value.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
|
| 129 |
+
|
| 130 |
+
if past_key_values is not None:
|
| 131 |
+
key = torch.cat([past_key_values[0], key], dim=-2)
|
| 132 |
+
value = torch.cat([past_key_values[1], value], dim=-2)
|
| 133 |
+
len_k = key.size(-2)
|
| 134 |
+
|
| 135 |
+
# (batch_size, num_heads, len_q, dim_head) @ (batch_size, num_heads, dim_head, len_k) -> (batch_size, num_heads, len_q, len_k)
|
| 136 |
+
score = torch.matmul(query, key.transpose(-1, -2)) / math.sqrt(self.dim_head)
|
| 137 |
+
score = score + position_bias
|
| 138 |
+
|
| 139 |
+
score = torch.masked_fill(
|
| 140 |
+
score,
|
| 141 |
+
attention_mask.view(batch_size, 1, len_q, len_k) == torch.tensor(False),
|
| 142 |
+
torch.scalar_tensor(float("-inf"), device=score.device, dtype=score.dtype),
|
| 143 |
+
)
|
| 144 |
+
score = self.softmax(score)
|
| 145 |
+
|
| 146 |
+
score = torch.masked_fill(
|
| 147 |
+
score,
|
| 148 |
+
attention_mask.view(batch_size, 1, len_q, len_k) == torch.tensor(False),
|
| 149 |
+
torch.scalar_tensor(0, device=score.device, dtype=score.dtype),
|
| 150 |
+
)
|
| 151 |
+
if output_attentions:
|
| 152 |
+
attn_weights = score
|
| 153 |
+
else:
|
| 154 |
+
attn_weights = None
|
| 155 |
+
|
| 156 |
+
if self.dropout is not None:
|
| 157 |
+
score = self.dropout(score)
|
| 158 |
+
|
| 159 |
+
# (batch_size, num_heads, len_q, len_k) @ (batch_size, num_heads, len_k, dim_head) -> (batch_size, num_heads, len_q, dim_head)
|
| 160 |
+
score = torch.matmul(score, value)
|
| 161 |
+
|
| 162 |
+
score = score.view(batch_size, self.num_heads, len_q, self.dim_head).permute(0, 2, 1, 3)
|
| 163 |
+
score = score.contiguous().view(batch_size, len_q, self.num_heads * self.dim_head)
|
| 164 |
+
|
| 165 |
+
score = self.attention_out(score)
|
| 166 |
+
|
| 167 |
+
past_key_values = None
|
| 168 |
+
if use_cache:
|
| 169 |
+
past_key_values = (key, value)
|
| 170 |
+
|
| 171 |
+
return score, attn_weights, past_key_values
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class CpmAntSelfAttentionBlock(nn.Module):
|
| 175 |
+
def __init__(self, config: CpmAntConfig):
|
| 176 |
+
super().__init__()
|
| 177 |
+
self.layernorm_before_attention = CpmAntLayerNorm(config)
|
| 178 |
+
self.self_attention = CpmAntAttention(config)
|
| 179 |
+
if config.dropout_p:
|
| 180 |
+
self.dropout = torch.nn.Dropout(config.dropout_p)
|
| 181 |
+
else:
|
| 182 |
+
self.dropout = None
|
| 183 |
+
|
| 184 |
+
def forward(
|
| 185 |
+
self,
|
| 186 |
+
hidden_states: torch.Tensor,
|
| 187 |
+
attention_mask: torch.Tensor,
|
| 188 |
+
position_bias: Optional[torch.Tensor] = None,
|
| 189 |
+
output_attentions: Optional[bool] = False,
|
| 190 |
+
past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 191 |
+
use_cache: Optional[bool] = None,
|
| 192 |
+
):
|
| 193 |
+
"""
|
| 194 |
+
Args:
|
| 195 |
+
hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`):
|
| 196 |
+
Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences.
|
| 197 |
+
attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):
|
| 198 |
+
Avoid invalid areas to participate in the calculation of self-attention.
|
| 199 |
+
position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):
|
| 200 |
+
Provide positional information to self-attention block.
|
| 201 |
+
output_attentions (`bool`, *optional*):
|
| 202 |
+
Whether or not to return the attentions tensors of all attention layers.
|
| 203 |
+
past_key_values (`Tuple(torch.FloatTensor)`, *optional*):
|
| 204 |
+
Cached past key and value projection states.
|
| 205 |
+
use_cache (`bool`, *optional*):
|
| 206 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 207 |
+
(see `past_key_values`).
|
| 208 |
+
"""
|
| 209 |
+
outputs = self.layernorm_before_attention(hidden_states)
|
| 210 |
+
outputs = self.self_attention(
|
| 211 |
+
outputs, outputs, attention_mask, position_bias, output_attentions, past_key_values, use_cache
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
outputs, attn_weights, current_key_value = outputs
|
| 215 |
+
|
| 216 |
+
if self.dropout is not None:
|
| 217 |
+
outputs = self.dropout(outputs)
|
| 218 |
+
hidden_states = hidden_states + outputs
|
| 219 |
+
|
| 220 |
+
return hidden_states, attn_weights, current_key_value
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
class CpmAntDenseGatedACT(nn.Module):
|
| 224 |
+
def __init__(self, config: CpmAntConfig):
|
| 225 |
+
super().__init__()
|
| 226 |
+
self.w_0 = nn.Linear(config.hidden_size, config.dim_ff, bias=False)
|
| 227 |
+
self.w_1 = nn.Linear(config.hidden_size, config.dim_ff, bias=False)
|
| 228 |
+
self.act = torch.nn.GELU()
|
| 229 |
+
|
| 230 |
+
def forward(self, hidden_states: torch.Tensor):
|
| 231 |
+
"""Transform an input tensor from one feature space to another via a nonlinear operation
|
| 232 |
+
|
| 233 |
+
Args:
|
| 234 |
+
hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)
|
| 235 |
+
"""
|
| 236 |
+
gate_score = self.act(self.w_0(hidden_states))
|
| 237 |
+
hidden_states = self.w_1(hidden_states)
|
| 238 |
+
|
| 239 |
+
hidden_states = gate_score * hidden_states
|
| 240 |
+
return hidden_states
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class CpmAntFeedForward(nn.Module):
|
| 244 |
+
def __init__(self, config: CpmAntConfig):
|
| 245 |
+
super().__init__()
|
| 246 |
+
self.w_in = CpmAntDenseGatedACT(config)
|
| 247 |
+
if config.dropout_p is not None:
|
| 248 |
+
self.dropout = torch.nn.Dropout(config.dropout_p)
|
| 249 |
+
else:
|
| 250 |
+
self.dropout = None
|
| 251 |
+
|
| 252 |
+
self.w_out = nn.Linear(config.dim_ff, config.hidden_size, bias=False)
|
| 253 |
+
|
| 254 |
+
def forward(self, hidden_states: torch.Tensor):
|
| 255 |
+
"""
|
| 256 |
+
Args:
|
| 257 |
+
hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)
|
| 258 |
+
"""
|
| 259 |
+
hidden_states = self.w_in(hidden_states)
|
| 260 |
+
|
| 261 |
+
if self.dropout is not None:
|
| 262 |
+
hidden_states = self.dropout(hidden_states)
|
| 263 |
+
|
| 264 |
+
hidden_states = self.w_out(hidden_states)
|
| 265 |
+
|
| 266 |
+
return hidden_states
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class CpmAntFFNBlock(nn.Module):
|
| 270 |
+
def __init__(self, config: CpmAntConfig):
|
| 271 |
+
super().__init__()
|
| 272 |
+
self.layernorm_before_ffn = CpmAntLayerNorm(config)
|
| 273 |
+
self.ffn = CpmAntFeedForward(config)
|
| 274 |
+
if config.dropout_p:
|
| 275 |
+
self.dropout = torch.nn.Dropout(config.dropout_p)
|
| 276 |
+
else:
|
| 277 |
+
self.dropout = None
|
| 278 |
+
|
| 279 |
+
def forward(
|
| 280 |
+
self,
|
| 281 |
+
hidden_states: torch.Tensor,
|
| 282 |
+
):
|
| 283 |
+
"""
|
| 284 |
+
Args:
|
| 285 |
+
hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`):
|
| 286 |
+
Hidden states before feed forward layer.
|
| 287 |
+
"""
|
| 288 |
+
ln_outputs = self.layernorm_before_ffn(hidden_states)
|
| 289 |
+
outputs = self.ffn(ln_outputs)
|
| 290 |
+
if self.dropout is not None:
|
| 291 |
+
outputs = self.dropout(outputs)
|
| 292 |
+
hidden_states = hidden_states + outputs
|
| 293 |
+
return hidden_states
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
class CpmAntTransformerBlock(nn.Module):
|
| 297 |
+
def __init__(self, config: CpmAntConfig):
|
| 298 |
+
super().__init__()
|
| 299 |
+
self.self_att = CpmAntSelfAttentionBlock(config)
|
| 300 |
+
self.ffn = CpmAntFFNBlock(config)
|
| 301 |
+
|
| 302 |
+
def forward(
|
| 303 |
+
self,
|
| 304 |
+
hidden_states: torch.Tensor,
|
| 305 |
+
attention_mask: torch.Tensor,
|
| 306 |
+
position_bias: Optional[torch.Tensor] = None,
|
| 307 |
+
output_attentions: Optional[bool] = False,
|
| 308 |
+
past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 309 |
+
use_cache: Optional[bool] = None,
|
| 310 |
+
):
|
| 311 |
+
"""
|
| 312 |
+
Args:
|
| 313 |
+
hidden_states (`torch.Tensor`):
|
| 314 |
+
Input to the layer of shape `(batch, seq_len, dim_model)`
|
| 315 |
+
attention_mask (`torch.Tensor`):
|
| 316 |
+
Avoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)`
|
| 317 |
+
position_bias (`torch.Tensor`):
|
| 318 |
+
Provides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)`
|
| 319 |
+
output_attentions (`bool`, *optional*):
|
| 320 |
+
Whether or not to return the attentions tensors of all attention layers.
|
| 321 |
+
past_key_values (`Tuple[torch.Tensor, torch.Tensor])`, *optional*):
|
| 322 |
+
Cached past key and value projection states
|
| 323 |
+
use_cache (`bool`, *optional*):
|
| 324 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 325 |
+
(see `past_key_values`).
|
| 326 |
+
"""
|
| 327 |
+
hidden_states = self.self_att(
|
| 328 |
+
hidden_states,
|
| 329 |
+
attention_mask=attention_mask,
|
| 330 |
+
position_bias=position_bias,
|
| 331 |
+
output_attentions=output_attentions,
|
| 332 |
+
past_key_values=past_key_values,
|
| 333 |
+
use_cache=use_cache,
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
hidden_states, attn_weights, current_key_value = hidden_states
|
| 337 |
+
|
| 338 |
+
hidden_states = self.ffn(hidden_states)
|
| 339 |
+
|
| 340 |
+
return hidden_states, attn_weights, current_key_value
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
class CpmAntEncoder(nn.Module):
|
| 344 |
+
def __init__(self, config: CpmAntConfig):
|
| 345 |
+
super().__init__()
|
| 346 |
+
self.num_layers = config.num_hidden_layers
|
| 347 |
+
self.layers = nn.ModuleList([CpmAntTransformerBlock(config) for ith in range(self.num_layers)])
|
| 348 |
+
|
| 349 |
+
self.output_layernorm = CpmAntLayerNorm(config)
|
| 350 |
+
|
| 351 |
+
def forward(
|
| 352 |
+
self,
|
| 353 |
+
hidden_states: torch.Tensor,
|
| 354 |
+
attention_mask: torch.Tensor,
|
| 355 |
+
position_bias: torch.Tensor,
|
| 356 |
+
output_attentions: Optional[bool] = None,
|
| 357 |
+
output_hidden_states: Optional[bool] = None,
|
| 358 |
+
past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 359 |
+
use_cache: Optional[bool] = None,
|
| 360 |
+
):
|
| 361 |
+
"""
|
| 362 |
+
Args:
|
| 363 |
+
hidden_states (`torch.Tensor`):
|
| 364 |
+
Input to the layer of shape `(batch, seq_len, dim_model)`
|
| 365 |
+
attention_mask (`torch.Tensor`):
|
| 366 |
+
Avoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)`
|
| 367 |
+
position_bias (`torch.Tensor`):
|
| 368 |
+
Provides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)`
|
| 369 |
+
output_attentions (`bool`, *optional*):
|
| 370 |
+
Whether or not to return the attentions tensors of all attention layers.
|
| 371 |
+
output_hidden_states (`bool`, *optional*):
|
| 372 |
+
Whether or not to return the hidden states of all layers.
|
| 373 |
+
past_key_values (`Tuple[torch.Tensor, torch.Tensor])`, *optional*):
|
| 374 |
+
Cached past key and value projection states
|
| 375 |
+
use_cache (`bool`, *optional*):
|
| 376 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 377 |
+
(see `past_key_values`).
|
| 378 |
+
"""
|
| 379 |
+
all_hidden_states = () if output_hidden_states else None
|
| 380 |
+
all_self_attns = () if output_attentions else None
|
| 381 |
+
current_key_values = () if use_cache else None
|
| 382 |
+
|
| 383 |
+
for i, layer in enumerate(self.layers):
|
| 384 |
+
if output_hidden_states:
|
| 385 |
+
all_hidden_states += (hidden_states,)
|
| 386 |
+
layer_outputs = layer(
|
| 387 |
+
hidden_states,
|
| 388 |
+
attention_mask,
|
| 389 |
+
position_bias,
|
| 390 |
+
output_attentions=output_attentions,
|
| 391 |
+
past_key_values=past_key_values[i] if past_key_values else None,
|
| 392 |
+
use_cache=use_cache,
|
| 393 |
+
)
|
| 394 |
+
hidden_states, attn_weights, current_key_value = layer_outputs
|
| 395 |
+
if output_attentions:
|
| 396 |
+
all_self_attns += (attn_weights,)
|
| 397 |
+
if current_key_value is not None:
|
| 398 |
+
current_key_values = current_key_values + (current_key_value,)
|
| 399 |
+
|
| 400 |
+
hidden_states = self.output_layernorm(hidden_states)
|
| 401 |
+
|
| 402 |
+
if output_hidden_states:
|
| 403 |
+
all_hidden_states += (hidden_states,)
|
| 404 |
+
|
| 405 |
+
return hidden_states, current_key_values, all_hidden_states, all_self_attns
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->CPMAnt
|
| 409 |
+
class CpmAntIntermediate(nn.Module):
|
| 410 |
+
def __init__(self, config):
|
| 411 |
+
super().__init__()
|
| 412 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 413 |
+
if isinstance(config.hidden_act, str):
|
| 414 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
| 415 |
+
else:
|
| 416 |
+
self.intermediate_act_fn = config.hidden_act
|
| 417 |
+
|
| 418 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 419 |
+
hidden_states = self.dense(hidden_states)
|
| 420 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 421 |
+
return hidden_states
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class CpmAntSegmentPositionEmbedding(nn.Module):
|
| 425 |
+
def __init__(self, config: CpmAntConfig):
|
| 426 |
+
super().__init__()
|
| 427 |
+
|
| 428 |
+
self.num_heads = config.num_attention_heads
|
| 429 |
+
self.num_buckets = config.position_bias_num_buckets
|
| 430 |
+
self.max_distance = config.position_bias_max_distance
|
| 431 |
+
self.num_segments = config.segment_types
|
| 432 |
+
|
| 433 |
+
self.relative_attention_bias = nn.Parameter(
|
| 434 |
+
torch.empty(
|
| 435 |
+
config.segment_types * config.segment_types + config.position_bias_num_buckets,
|
| 436 |
+
config.num_attention_heads,
|
| 437 |
+
)
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
def forward(
|
| 441 |
+
self,
|
| 442 |
+
key_pos: torch.Tensor,
|
| 443 |
+
query_pos: torch.Tensor,
|
| 444 |
+
key_segment: torch.Tensor,
|
| 445 |
+
query_segment: torch.Tensor,
|
| 446 |
+
):
|
| 447 |
+
with torch.no_grad():
|
| 448 |
+
batch = key_pos.size(0)
|
| 449 |
+
keylen = key_pos.size(1)
|
| 450 |
+
querylen = query_pos.size(1)
|
| 451 |
+
|
| 452 |
+
if key_pos.size(0) != query_pos.size(0):
|
| 453 |
+
raise AssertionError(
|
| 454 |
+
f"key_pos.size(0) should be equal to query_pos.size(0), but got {key_pos.size(0)} and {query_pos.size(0)}!"
|
| 455 |
+
)
|
| 456 |
+
if keylen != key_segment.size(1) or querylen != query_segment.size(1):
|
| 457 |
+
raise AssertionError(
|
| 458 |
+
f"keylen should be equal to key_segment.size(1), but got {keylen} and {key_segment.size(1)}!"
|
| 459 |
+
)
|
| 460 |
+
if querylen != query_segment.size(1):
|
| 461 |
+
raise AssertionError(
|
| 462 |
+
f"querylen should be equal to query_segment.size(1), but got {querylen} and {query_segment.szie(1)}!"
|
| 463 |
+
)
|
| 464 |
+
|
| 465 |
+
key_pos = key_pos.view(batch, -1, keylen)
|
| 466 |
+
query_pos = query_pos.view(batch, querylen, -1)
|
| 467 |
+
key_segment = key_segment.view(batch, -1, keylen)
|
| 468 |
+
query_segment = query_segment.view(batch, querylen, -1)
|
| 469 |
+
|
| 470 |
+
relative_position_bucket = self._segment_relative_position_bucket(query_segment, key_segment)
|
| 471 |
+
relative_position_bucket = relative_position_bucket + self.num_buckets
|
| 472 |
+
|
| 473 |
+
# (batch, len_q, len_k)
|
| 474 |
+
absolute_position_bucket = self._position_bucket(
|
| 475 |
+
torch.arange(keylen, dtype=torch.int32, device=relative_position_bucket.device)[None, :]
|
| 476 |
+
- torch.arange(querylen, dtype=torch.int32, device=relative_position_bucket.device)[:, None],
|
| 477 |
+
num_buckets=self.num_buckets,
|
| 478 |
+
max_distance=self.max_distance,
|
| 479 |
+
)
|
| 480 |
+
relative_position_bucket = torch.where(
|
| 481 |
+
(key_segment == query_segment),
|
| 482 |
+
absolute_position_bucket[None, :, :],
|
| 483 |
+
relative_position_bucket,
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
# (batch, len_q, len_k, num_heads)
|
| 487 |
+
embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
|
| 488 |
+
# (batch, num_heads, len_q, len_k)
|
| 489 |
+
embeds = embeds.permute(0, 3, 1, 2).contiguous()
|
| 490 |
+
return embeds
|
| 491 |
+
|
| 492 |
+
def _segment_relative_position_bucket(self, query_segment, key_segment):
|
| 493 |
+
return query_segment * self.num_segments + key_segment
|
| 494 |
+
|
| 495 |
+
def _position_bucket(self, relative_position, num_buckets=32, max_distance=128):
|
| 496 |
+
relative_buckets = 0
|
| 497 |
+
# always bidirectional in CPMAnt
|
| 498 |
+
num_buckets //= 2
|
| 499 |
+
relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
|
| 500 |
+
relative_position = torch.abs(relative_position)
|
| 501 |
+
max_exact = num_buckets // 2
|
| 502 |
+
is_small = relative_position < max_exact
|
| 503 |
+
relative_postion_if_large = max_exact + (
|
| 504 |
+
torch.log(relative_position.float() / max_exact)
|
| 505 |
+
/ math.log(max_distance / max_exact)
|
| 506 |
+
* (num_buckets - max_exact)
|
| 507 |
+
).to(torch.int32)
|
| 508 |
+
relative_postion_if_large = torch.min(
|
| 509 |
+
relative_postion_if_large,
|
| 510 |
+
torch.full_like(relative_postion_if_large, num_buckets - 1),
|
| 511 |
+
)
|
| 512 |
+
relative_buckets += torch.where(is_small, relative_position.to(torch.int32), relative_postion_if_large)
|
| 513 |
+
return relative_buckets
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->CPMAnt
|
| 517 |
+
class CpmAntOutput(nn.Module):
|
| 518 |
+
def __init__(self, config):
|
| 519 |
+
super().__init__()
|
| 520 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 521 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 522 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 523 |
+
|
| 524 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
| 525 |
+
hidden_states = self.dense(hidden_states)
|
| 526 |
+
hidden_states = self.dropout(hidden_states)
|
| 527 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 528 |
+
return hidden_states
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
class CpmAntPreTrainedModel(PreTrainedModel):
|
| 532 |
+
"""
|
| 533 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 534 |
+
models.
|
| 535 |
+
"""
|
| 536 |
+
|
| 537 |
+
config_class = CpmAntConfig
|
| 538 |
+
base_model_prefix = "cpmant"
|
| 539 |
+
supports_gradient_checkpointing = True
|
| 540 |
+
|
| 541 |
+
def _init_weights(self, module):
|
| 542 |
+
"""Initialize the weights"""
|
| 543 |
+
if isinstance(module, nn.Linear):
|
| 544 |
+
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
|
| 545 |
+
if module.bias is not None:
|
| 546 |
+
module.bias.data.zero_()
|
| 547 |
+
elif isinstance(module, nn.Embedding):
|
| 548 |
+
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
|
| 549 |
+
if module.padding_idx is not None:
|
| 550 |
+
module.weight.data[module.padding_idx].zero_()
|
| 551 |
+
elif isinstance(module, nn.LayerNorm):
|
| 552 |
+
module.bias.data.zero_()
|
| 553 |
+
module.weight.data.fill_(1.0)
|
| 554 |
+
elif isinstance(module, CpmAntLayerNorm):
|
| 555 |
+
module.weight.data.fill_(1.0)
|
| 556 |
+
elif isinstance(module, CpmAntSegmentPositionEmbedding):
|
| 557 |
+
module.relative_attention_bias.data.normal_(mean=0.0, std=self.config.init_std)
|
| 558 |
+
|
| 559 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 560 |
+
if isinstance(module, CpmAntEncoder):
|
| 561 |
+
module.gradient_checkpointing = value
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
CPMANT_START_DOCSTRING = r"""
|
| 565 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
| 566 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 567 |
+
behavior.
|
| 568 |
+
|
| 569 |
+
Parameters
|
| 570 |
+
config ([`~CpmAntConfig`]): Model configuration class with all the parameters of the
|
| 571 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 572 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 573 |
+
"""
|
| 574 |
+
|
| 575 |
+
CPMANT_INPUTS_DOCSTRING = r"""
|
| 576 |
+
Args:
|
| 577 |
+
input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`):
|
| 578 |
+
Indices of input sequence tokens in the vocabulary.
|
| 579 |
+
|
| 580 |
+
Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 581 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 582 |
+
|
| 583 |
+
[What are input IDs?](../glossary#input-ids)
|
| 584 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 585 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 586 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
| 587 |
+
use_cache (`bool`, *optional*):
|
| 588 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 589 |
+
`past_key_values`).
|
| 590 |
+
output_attentions (`bool`, *optional*):
|
| 591 |
+
Whether or not to return the attentions tensors of all attention layers.
|
| 592 |
+
output_hidden_states (`bool`, *optional*):
|
| 593 |
+
Whether or not to return the hidden states of all layers.
|
| 594 |
+
return_dict (`bool`, *optional*):
|
| 595 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 596 |
+
"""
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
@add_start_docstrings(
|
| 600 |
+
"The bare CPMAnt Model outputting raw hidden-states without any specific head on top.",
|
| 601 |
+
CPMANT_START_DOCSTRING,
|
| 602 |
+
)
|
| 603 |
+
class CpmAntModel(CpmAntPreTrainedModel):
|
| 604 |
+
def __init__(self, config: CpmAntConfig):
|
| 605 |
+
super().__init__(config)
|
| 606 |
+
self.encoder = CpmAntEncoder(config)
|
| 607 |
+
self.segment_embedding = nn.Embedding(config.segment_types, config.hidden_size)
|
| 608 |
+
self.input_embedding = nn.Embedding(
|
| 609 |
+
config.vocab_size + config.prompt_types * config.prompt_length, config.hidden_size
|
| 610 |
+
)
|
| 611 |
+
self.position_bias = CpmAntSegmentPositionEmbedding(config)
|
| 612 |
+
self.prompt_length = config.prompt_length
|
| 613 |
+
self.vocab_size = config.vocab_size
|
| 614 |
+
|
| 615 |
+
self.post_init()
|
| 616 |
+
|
| 617 |
+
def get_input_embeddings(self):
|
| 618 |
+
return self.input_embedding
|
| 619 |
+
|
| 620 |
+
def set_input_embeddings(self, embeddings, **kwargs):
|
| 621 |
+
self.input_embedding = embeddings
|
| 622 |
+
|
| 623 |
+
def _prepare_attention_mask(self, input_ids, span, context, length):
|
| 624 |
+
batch = input_ids.size(0)
|
| 625 |
+
seqlen = input_ids.size(1)
|
| 626 |
+
device = input_ids.device
|
| 627 |
+
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(seqlen, device=device).view(-1, 1)
|
| 628 |
+
attention_mask = context[:, None, :] | (
|
| 629 |
+
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
|
| 630 |
+
)
|
| 631 |
+
attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
|
| 632 |
+
# mask for left padding
|
| 633 |
+
mask_1d = (
|
| 634 |
+
torch.tensor(list(range(seqlen - self.prompt_length))[::-1], device=device)[None, :].repeat(batch, 1)
|
| 635 |
+
< length[:, None]
|
| 636 |
+
)
|
| 637 |
+
mask_1d = torch.cat((torch.ones(batch, self.prompt_length, device=device).bool(), mask_1d), dim=1)
|
| 638 |
+
attention_mask = mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
|
| 639 |
+
return attention_mask
|
| 640 |
+
|
| 641 |
+
@add_start_docstrings_to_model_forward(CPMANT_INPUTS_DOCSTRING)
|
| 642 |
+
@add_code_sample_docstrings(
|
| 643 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 644 |
+
output_type=BaseModelOutputWithPast,
|
| 645 |
+
config_class=_CONFIG_FOR_DOC,
|
| 646 |
+
)
|
| 647 |
+
def forward(
|
| 648 |
+
self,
|
| 649 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 650 |
+
output_attentions: Optional[bool] = None,
|
| 651 |
+
output_hidden_states: Optional[bool] = None,
|
| 652 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
| 653 |
+
use_cache: Optional[bool] = None,
|
| 654 |
+
return_dict: Optional[bool] = None,
|
| 655 |
+
**kwargs,
|
| 656 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]:
|
| 657 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 658 |
+
output_hidden_states = (
|
| 659 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 660 |
+
)
|
| 661 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 662 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 663 |
+
|
| 664 |
+
# add prompts ahead
|
| 665 |
+
if input_ids.dtype != torch.int32:
|
| 666 |
+
input_ids = input_ids.to(torch.int32)
|
| 667 |
+
dtype, device = input_ids.dtype, input_ids.device
|
| 668 |
+
segment = torch.where(input_ids != 0, 2, 0).to(dtype=dtype, device=device)
|
| 669 |
+
length = (segment != 0).sum(-1).to(dtype=dtype, device=device)
|
| 670 |
+
input_ids = torch.cat(
|
| 671 |
+
(
|
| 672 |
+
torch.arange(
|
| 673 |
+
self.prompt_length * 2 + self.vocab_size,
|
| 674 |
+
self.prompt_length * 3 + self.vocab_size,
|
| 675 |
+
dtype=dtype,
|
| 676 |
+
device=device,
|
| 677 |
+
).repeat(input_ids.size(0), 1),
|
| 678 |
+
input_ids,
|
| 679 |
+
),
|
| 680 |
+
dim=1,
|
| 681 |
+
)
|
| 682 |
+
batch, seq_length = input_ids.size()
|
| 683 |
+
segment = torch.cat((torch.zeros(batch, self.prompt_length, dtype=dtype, device=device), segment), dim=1)
|
| 684 |
+
context = torch.full((batch, seq_length), 1, dtype=dtype, device=device)
|
| 685 |
+
position = torch.arange(seq_length, dtype=dtype, device=device).repeat(batch, 1)
|
| 686 |
+
span = torch.full((batch, seq_length), 0, dtype=dtype, device=device)
|
| 687 |
+
|
| 688 |
+
if past_key_values is None:
|
| 689 |
+
past_length = 0
|
| 690 |
+
past_key_values = tuple([None] * self.encoder.num_layers)
|
| 691 |
+
input_ids = input_ids.contiguous()
|
| 692 |
+
hidden_states = self.input_embedding(input_ids)
|
| 693 |
+
segment_states = self.segment_embedding(segment)
|
| 694 |
+
hidden_states = hidden_states + segment_states
|
| 695 |
+
else:
|
| 696 |
+
past_length = past_key_values[0][0].size(-2)
|
| 697 |
+
segment_states = self.segment_embedding(segment)
|
| 698 |
+
hidden_states = self.input_embedding(input_ids) + segment_states[:, -1:, :]
|
| 699 |
+
|
| 700 |
+
attention_mask = self._prepare_attention_mask(input_ids, span, context, length)
|
| 701 |
+
position_bias = self.position_bias(position, position, segment, segment)
|
| 702 |
+
|
| 703 |
+
attention_mask = attention_mask[:, past_length:, :]
|
| 704 |
+
position_bias = position_bias[:, :, past_length:, :]
|
| 705 |
+
hidden_states = hidden_states[:, past_length:, :]
|
| 706 |
+
|
| 707 |
+
hidden_states, present_key_values, all_hidden_states, all_attentions = self.encoder(
|
| 708 |
+
hidden_states,
|
| 709 |
+
attention_mask,
|
| 710 |
+
position_bias,
|
| 711 |
+
output_attentions,
|
| 712 |
+
output_hidden_states,
|
| 713 |
+
past_key_values,
|
| 714 |
+
use_cache,
|
| 715 |
+
)
|
| 716 |
+
|
| 717 |
+
if past_length == 0:
|
| 718 |
+
hidden_states = hidden_states[:, self.prompt_length :, :]
|
| 719 |
+
# drop the prompt
|
| 720 |
+
if all_attentions is not None:
|
| 721 |
+
new_attentions = ()
|
| 722 |
+
for attention in all_attentions:
|
| 723 |
+
new_attentions += (attention[:, :, self.prompt_length :, self.prompt_length :],)
|
| 724 |
+
all_attentions = new_attentions
|
| 725 |
+
if all_hidden_states is not None:
|
| 726 |
+
new_hidden_states = ()
|
| 727 |
+
for hidden_state in all_hidden_states:
|
| 728 |
+
new_hidden_states += (hidden_state[:, self.prompt_length :, :],)
|
| 729 |
+
all_hidden_states = new_hidden_states
|
| 730 |
+
|
| 731 |
+
if not return_dict:
|
| 732 |
+
return tuple(
|
| 733 |
+
v for v in [hidden_states, present_key_values, all_hidden_states, all_attentions] if v is not None
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
return BaseModelOutputWithPast(
|
| 737 |
+
last_hidden_state=hidden_states,
|
| 738 |
+
past_key_values=present_key_values,
|
| 739 |
+
hidden_states=all_hidden_states,
|
| 740 |
+
attentions=all_attentions,
|
| 741 |
+
)
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
@add_start_docstrings(
|
| 745 |
+
"""
|
| 746 |
+
The CPMAnt Model with a language modeling head on top (linear layer with weights tied to the input embeddings).
|
| 747 |
+
""",
|
| 748 |
+
CPMANT_START_DOCSTRING,
|
| 749 |
+
)
|
| 750 |
+
class CpmAntForCausalLM(CpmAntPreTrainedModel):
|
| 751 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 752 |
+
|
| 753 |
+
def __init__(self, config: CpmAntConfig):
|
| 754 |
+
super().__init__(config)
|
| 755 |
+
self.cpmant = CpmAntModel(config)
|
| 756 |
+
|
| 757 |
+
# lm_head.weight is tied to cpmant.input_embedding.weight
|
| 758 |
+
self.lm_head = nn.Linear(
|
| 759 |
+
config.hidden_size, config.vocab_size + config.prompt_types * config.prompt_length, bias=False
|
| 760 |
+
)
|
| 761 |
+
self.post_init()
|
| 762 |
+
|
| 763 |
+
@add_start_docstrings_to_model_forward(CPMANT_INPUTS_DOCSTRING)
|
| 764 |
+
@add_code_sample_docstrings(
|
| 765 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 766 |
+
output_type=CausalLMOutputWithPast,
|
| 767 |
+
config_class=_CONFIG_FOR_DOC,
|
| 768 |
+
)
|
| 769 |
+
def forward(
|
| 770 |
+
self,
|
| 771 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 772 |
+
past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
|
| 773 |
+
use_cache: Optional[bool] = None,
|
| 774 |
+
output_attentions: Optional[bool] = None,
|
| 775 |
+
output_hidden_states: Optional[bool] = None,
|
| 776 |
+
labels: Optional[torch.Tensor] = None,
|
| 777 |
+
return_dict: Optional[bool] = None,
|
| 778 |
+
attention_mask: Optional[torch.Tensor] = None, # dummy parameter for text-generation pipeline
|
| 779 |
+
**kwargs,
|
| 780 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 781 |
+
r"""
|
| 782 |
+
Args:
|
| 783 |
+
input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`):
|
| 784 |
+
Indices of input sequence tokens in the vocabulary.
|
| 785 |
+
|
| 786 |
+
Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 787 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 788 |
+
|
| 789 |
+
[What are input IDs?](../glossary#input-ids)
|
| 790 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 791 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
|
| 792 |
+
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
| 793 |
+
use_cache (`bool`, *optional*):
|
| 794 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 795 |
+
(see `past_key_values`).
|
| 796 |
+
output_attentions (`bool`, *optional*):
|
| 797 |
+
Whether or not to return the attentions tensors of all attention layers.
|
| 798 |
+
output_hidden_states (`bool`, *optional*):
|
| 799 |
+
Whether or not to return the hidden states of all layers.
|
| 800 |
+
labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 801 |
+
Labels for computing the masked language modeling loss.
|
| 802 |
+
return_dict (`bool`, *optional*):
|
| 803 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 804 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 805 |
+
CPMAnt will process attention mask automatically, this parameter is a dummy parameter for
|
| 806 |
+
text-generation pipeline.
|
| 807 |
+
|
| 808 |
+
Example:
|
| 809 |
+
|
| 810 |
+
Text Generation with CpmAntForCausalLM.
|
| 811 |
+
```python
|
| 812 |
+
>>> from transformers import CPMAntTokenizer, CpmAntForCausalLM
|
| 813 |
+
|
| 814 |
+
>>> texts = "今天天气不错,"
|
| 815 |
+
>>> model = CpmAntForCausalLM.from_pretrained("openbmb/cpm-ant-10b")
|
| 816 |
+
>>> tokenizer = CPMAntTokenizer.from_pretrained("openbmb/cpm-ant-10b")
|
| 817 |
+
>>> input_ids = tokenizer(texts, return_tensors="pt")
|
| 818 |
+
>>> outputs = model.generate(**input_ids)
|
| 819 |
+
>>> output_texts = tokenizer.batch_decode(outputs)
|
| 820 |
+
>>> print(output_texts)
|
| 821 |
+
['今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的']
|
| 822 |
+
```
|
| 823 |
+
"""
|
| 824 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 825 |
+
|
| 826 |
+
model_output = self.cpmant(
|
| 827 |
+
input_ids, output_attentions, output_hidden_states, past_key_values, use_cache, return_dict
|
| 828 |
+
)
|
| 829 |
+
hidden_states = model_output.last_hidden_state if return_dict else model_output[0]
|
| 830 |
+
|
| 831 |
+
logits = self.lm_head(hidden_states)
|
| 832 |
+
|
| 833 |
+
loss = None
|
| 834 |
+
if labels is not None:
|
| 835 |
+
loss_func = CrossEntropyLoss()
|
| 836 |
+
loss = loss_func(logits.view(-1, logits.size(-1)), labels.view(-1))
|
| 837 |
+
|
| 838 |
+
if not return_dict:
|
| 839 |
+
output = (logits,) + model_output[1:]
|
| 840 |
+
return ((loss,) + output) if loss is not None else output
|
| 841 |
+
|
| 842 |
+
return CausalLMOutputWithPast(
|
| 843 |
+
loss=loss,
|
| 844 |
+
logits=logits,
|
| 845 |
+
past_key_values=model_output.past_key_values,
|
| 846 |
+
hidden_states=model_output.hidden_states,
|
| 847 |
+
attentions=model_output.attentions,
|
| 848 |
+
)
|
| 849 |
+
|
| 850 |
+
def get_input_embeddings(self):
|
| 851 |
+
return self.cpmant.input_embedding
|
| 852 |
+
|
| 853 |
+
def set_input_embeddings(self, embeddings):
|
| 854 |
+
self.cpmant.input_embedding = embeddings
|
| 855 |
+
|
| 856 |
+
def get_output_embeddings(self):
|
| 857 |
+
return self.lm_head
|
| 858 |
+
|
| 859 |
+
def set_output_embeddings(self, new_embeddings):
|
| 860 |
+
self.lm_head = new_embeddings
|
| 861 |
+
|
| 862 |
+
def prepare_inputs_for_generation(self, input_ids, **kwargs):
|
| 863 |
+
input_ids = input_ids.int()
|
| 864 |
+
# save the memory usage of dummy attention mask
|
| 865 |
+
if "attention_mask" in kwargs:
|
| 866 |
+
kwargs["attention_mask"] = torch.zeros(1, 1)
|
| 867 |
+
|
| 868 |
+
return {
|
| 869 |
+
"input_ids": input_ids,
|
| 870 |
+
"use_cache": kwargs["use_cache"],
|
| 871 |
+
"past_key_values": kwargs.get("past_key_values", None),
|
| 872 |
+
}
|
| 873 |
+
|
| 874 |
+
def _reorder_cache(self, past_key_values, beam_idx):
|
| 875 |
+
past_key_values = [list(each) if each is not None else each for each in past_key_values]
|
| 876 |
+
for key_value_layer in past_key_values:
|
| 877 |
+
key_value_layer[0] = key_value_layer[0][beam_idx]
|
| 878 |
+
key_value_layer[1] = key_value_layer[1][beam_idx]
|
| 879 |
+
return past_key_values
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/cpmant/tokenization_cpmant.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for CPMAnt."""
|
| 16 |
+
import collections
|
| 17 |
+
import os
|
| 18 |
+
from typing import List, Optional, Tuple
|
| 19 |
+
|
| 20 |
+
from transformers.utils import is_jieba_available, requires_backends
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
if is_jieba_available():
|
| 24 |
+
import jieba
|
| 25 |
+
|
| 26 |
+
from ...tokenization_utils import PreTrainedTokenizer
|
| 27 |
+
from ...utils import logging
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
logger = logging.get_logger(__name__)
|
| 31 |
+
|
| 32 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
|
| 33 |
+
|
| 34 |
+
PRETRAINED_VOCAB_FILES_MAP = {
|
| 35 |
+
"vocab_file": {
|
| 36 |
+
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
|
| 37 |
+
},
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
| 41 |
+
"openbmb/cpm-ant-10b": 1024,
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def load_vocab(vocab_file):
|
| 46 |
+
"""Loads a vocabulary file into a dictionary."""
|
| 47 |
+
vocab = collections.OrderedDict()
|
| 48 |
+
with open(vocab_file, "r", encoding="utf-8") as reader:
|
| 49 |
+
tokens = reader.readlines()
|
| 50 |
+
for index, token in enumerate(tokens):
|
| 51 |
+
token = token.rstrip("\n")
|
| 52 |
+
vocab[token] = index
|
| 53 |
+
return vocab
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class WordpieceTokenizer(object):
|
| 57 |
+
def __init__(self, vocab, unk_token="<unk>", max_input_chars_per_word=200):
|
| 58 |
+
self.vocab = vocab
|
| 59 |
+
self.unk_token = unk_token
|
| 60 |
+
self.max_input_chars_per_word = max_input_chars_per_word
|
| 61 |
+
|
| 62 |
+
def tokenize(self, token):
|
| 63 |
+
chars = list(token)
|
| 64 |
+
if len(chars) > self.max_input_chars_per_word:
|
| 65 |
+
return [self.unk_token]
|
| 66 |
+
|
| 67 |
+
start = 0
|
| 68 |
+
sub_tokens = []
|
| 69 |
+
while start < len(chars):
|
| 70 |
+
end = len(chars)
|
| 71 |
+
cur_substr = None
|
| 72 |
+
while start < end:
|
| 73 |
+
substr = "".join(chars[start:end])
|
| 74 |
+
if substr in self.vocab:
|
| 75 |
+
cur_substr = substr
|
| 76 |
+
break
|
| 77 |
+
end -= 1
|
| 78 |
+
if cur_substr is None:
|
| 79 |
+
sub_tokens.append(self.unk_token)
|
| 80 |
+
start += 1
|
| 81 |
+
else:
|
| 82 |
+
sub_tokens.append(cur_substr)
|
| 83 |
+
start = end
|
| 84 |
+
|
| 85 |
+
return sub_tokens
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class CpmAntTokenizer(PreTrainedTokenizer):
|
| 89 |
+
"""
|
| 90 |
+
Construct a CPMAnt tokenizer. Based on byte-level Byte-Pair-Encoding.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
vocab_file (`str`):
|
| 94 |
+
Path to the vocabulary file.
|
| 95 |
+
bod_token (`str`, *optional*, defaults to `"<d>"`):
|
| 96 |
+
The beginning of document token.
|
| 97 |
+
eod_token (`str`, *optional*, defaults to `"</d>"`):
|
| 98 |
+
The end of document token.
|
| 99 |
+
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
| 100 |
+
The beginning of sequence token.
|
| 101 |
+
eos_token (`str`, *optional*, defaults to `"</s>"`):
|
| 102 |
+
The end of sequence token.
|
| 103 |
+
pad_token (`str`, *optional*, defaults to `"<pad>"`):
|
| 104 |
+
The token used for padding.
|
| 105 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
| 106 |
+
The unknown token.
|
| 107 |
+
line_token (`str`, *optional*, defaults to `"</n>"`):
|
| 108 |
+
The line token.
|
| 109 |
+
space_token (`str`, *optional*, defaults to `"</_>"`):
|
| 110 |
+
The space token.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 114 |
+
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
| 115 |
+
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
| 116 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 117 |
+
add_prefix_space = False
|
| 118 |
+
|
| 119 |
+
def __init__(
|
| 120 |
+
self,
|
| 121 |
+
vocab_file,
|
| 122 |
+
bod_token="<d>",
|
| 123 |
+
eod_token="</d>",
|
| 124 |
+
bos_token="<s>",
|
| 125 |
+
eos_token="</s>",
|
| 126 |
+
pad_token="<pad>",
|
| 127 |
+
unk_token="<unk>",
|
| 128 |
+
line_token="</n>",
|
| 129 |
+
space_token="</_>",
|
| 130 |
+
padding_side="left",
|
| 131 |
+
**kwargs,
|
| 132 |
+
):
|
| 133 |
+
requires_backends(self, ["jieba"])
|
| 134 |
+
super().__init__(
|
| 135 |
+
bod_token=bod_token,
|
| 136 |
+
eod_token=eod_token,
|
| 137 |
+
bos_token=bos_token,
|
| 138 |
+
eos_token=eos_token,
|
| 139 |
+
pad_token=pad_token,
|
| 140 |
+
unk_token=unk_token,
|
| 141 |
+
line_token=line_token,
|
| 142 |
+
space_token=space_token,
|
| 143 |
+
padding_side=padding_side,
|
| 144 |
+
**kwargs,
|
| 145 |
+
)
|
| 146 |
+
self.bod_token = bod_token
|
| 147 |
+
self.eod_token = eod_token
|
| 148 |
+
self.encoder = load_vocab(vocab_file)
|
| 149 |
+
self.encoder[" "] = self.encoder[space_token]
|
| 150 |
+
self.encoder["\n"] = self.encoder[line_token]
|
| 151 |
+
|
| 152 |
+
del self.encoder[space_token]
|
| 153 |
+
del self.encoder[line_token]
|
| 154 |
+
|
| 155 |
+
self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1]))
|
| 156 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
| 157 |
+
|
| 158 |
+
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=self.unk_token)
|
| 159 |
+
|
| 160 |
+
@property
|
| 161 |
+
def bod_token_id(self):
|
| 162 |
+
return self.encoder[self.bod_token]
|
| 163 |
+
|
| 164 |
+
@property
|
| 165 |
+
def eod_token_id(self):
|
| 166 |
+
return self.encoder[self.eod_token]
|
| 167 |
+
|
| 168 |
+
@property
|
| 169 |
+
def newline_id(self):
|
| 170 |
+
return self.encoder["\n"]
|
| 171 |
+
|
| 172 |
+
@property
|
| 173 |
+
def vocab_size(self) -> int:
|
| 174 |
+
return len(self.encoder)
|
| 175 |
+
|
| 176 |
+
def get_vocab(self):
|
| 177 |
+
return dict(self.encoder, **self.added_tokens_encoder)
|
| 178 |
+
|
| 179 |
+
def _tokenize(self, text):
|
| 180 |
+
"""Tokenize a string."""
|
| 181 |
+
output_tokens = []
|
| 182 |
+
for x in jieba.cut(text, cut_all=False):
|
| 183 |
+
output_tokens.extend(self.wordpiece_tokenizer.tokenize(x))
|
| 184 |
+
return output_tokens
|
| 185 |
+
|
| 186 |
+
def _decode(self, token_ids, **kwargs):
|
| 187 |
+
"""Decode ids into a string."""
|
| 188 |
+
token_ids = [i for i in token_ids if i >= 0]
|
| 189 |
+
token_ids = [
|
| 190 |
+
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
|
| 191 |
+
]
|
| 192 |
+
return super()._decode(token_ids, **kwargs)
|
| 193 |
+
|
| 194 |
+
def check(self, token):
|
| 195 |
+
return token in self.encoder
|
| 196 |
+
|
| 197 |
+
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
| 198 |
+
return "".join(tokens)
|
| 199 |
+
|
| 200 |
+
def _convert_token_to_id(self, token):
|
| 201 |
+
"""Converts a token (str) in an id using the vocab."""
|
| 202 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
| 203 |
+
|
| 204 |
+
def _convert_id_to_token(self, index):
|
| 205 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
| 206 |
+
return self.decoder.get(index, self.unk_token)
|
| 207 |
+
|
| 208 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 209 |
+
if os.path.isdir(save_directory):
|
| 210 |
+
vocab_file = os.path.join(
|
| 211 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| 212 |
+
)
|
| 213 |
+
else:
|
| 214 |
+
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
|
| 215 |
+
index = 0
|
| 216 |
+
if " " in self.encoder:
|
| 217 |
+
self.encoder["</_>"] = self.encoder[" "]
|
| 218 |
+
del self.encoder[" "]
|
| 219 |
+
if "\n" in self.encoder:
|
| 220 |
+
self.encoder["</n>"] = self.encoder["\n"]
|
| 221 |
+
del self.encoder["\n"]
|
| 222 |
+
self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1]))
|
| 223 |
+
with open(vocab_file, "w", encoding="utf-8") as writer:
|
| 224 |
+
for token, token_index in self.encoder.items():
|
| 225 |
+
if index != token_index:
|
| 226 |
+
logger.warning(
|
| 227 |
+
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
|
| 228 |
+
" Please check that the vocabulary is not corrupted!"
|
| 229 |
+
)
|
| 230 |
+
index = token_index
|
| 231 |
+
writer.write(token + "\n")
|
| 232 |
+
index += 1
|
| 233 |
+
return (vocab_file,)
|
| 234 |
+
|
| 235 |
+
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: List[int] = None) -> List[int]:
|
| 236 |
+
"""
|
| 237 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
| 238 |
+
adding special tokens. A CPMAnt sequence has the following format:
|
| 239 |
+
|
| 240 |
+
- single sequence: `[BOS] Sequence`.
|
| 241 |
+
|
| 242 |
+
Args:
|
| 243 |
+
token_ids_0 (`List[int]`): The first tokenized sequence that special tokens will be added.
|
| 244 |
+
token_ids_1 (`List[int]`): The optional second tokenized sequence that special tokens will be added.
|
| 245 |
+
|
| 246 |
+
Returns:
|
| 247 |
+
`List[int]`: The model input with special tokens.
|
| 248 |
+
"""
|
| 249 |
+
if token_ids_1 is None:
|
| 250 |
+
return [self.bos_token_id] + token_ids_0
|
| 251 |
+
return [self.bos_token_id] + token_ids_0 + [self.bos_token_id] + token_ids_1
|
| 252 |
+
|
| 253 |
+
def get_special_tokens_mask(
|
| 254 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
| 255 |
+
) -> List[int]:
|
| 256 |
+
"""
|
| 257 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
| 258 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
| 259 |
+
|
| 260 |
+
Args:
|
| 261 |
+
token_ids_0 (`List[int]`): List of IDs.
|
| 262 |
+
token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs.
|
| 263 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
| 264 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
| 268 |
+
"""
|
| 269 |
+
|
| 270 |
+
if already_has_special_tokens:
|
| 271 |
+
return super().get_special_tokens_mask(
|
| 272 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
if token_ids_1 is not None:
|
| 276 |
+
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
|
| 277 |
+
return [1] + ([0] * len(token_ids_0))
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (189 Bytes). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc
ADDED
|
Binary file (4.67 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc
ADDED
|
Binary file (7.46 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc
ADDED
|
Binary file (17.6 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" RetriBERT model configuration"""
|
| 16 |
+
|
| 17 |
+
from ....configuration_utils import PretrainedConfig
|
| 18 |
+
from ....utils import logging
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.get_logger(__name__)
|
| 22 |
+
|
| 23 |
+
# TODO: upload to AWS
|
| 24 |
+
RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 25 |
+
"yjernite/retribert-base-uncased": (
|
| 26 |
+
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
|
| 27 |
+
),
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class RetriBertConfig(PretrainedConfig):
|
| 32 |
+
r"""
|
| 33 |
+
This is the configuration class to store the configuration of a [`RetriBertModel`]. It is used to instantiate a
|
| 34 |
+
RetriBertModel model according to the specified arguments, defining the model architecture. Instantiating a
|
| 35 |
+
configuration with the defaults will yield a similar configuration to that of the RetriBERT
|
| 36 |
+
[yjernite/retribert-base-uncased](https://huggingface.co/yjernite/retribert-base-uncased) architecture.
|
| 37 |
+
|
| 38 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 39 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
| 44 |
+
Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by
|
| 45 |
+
the `inputs_ids` passed when calling [`RetriBertModel`]
|
| 46 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 47 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 48 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 49 |
+
Number of hidden layers in the Transformer encoder.
|
| 50 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 51 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 52 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 53 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
| 54 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 55 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 56 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
| 57 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 58 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 59 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 60 |
+
The dropout ratio for the attention probabilities.
|
| 61 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
| 62 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 63 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 64 |
+
type_vocab_size (`int`, *optional*, defaults to 2):
|
| 65 |
+
The vocabulary size of the *token_type_ids* passed into [`BertModel`].
|
| 66 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 67 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 68 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 69 |
+
The epsilon used by the layer normalization layers.
|
| 70 |
+
share_encoders (`bool`, *optional*, defaults to `True`):
|
| 71 |
+
Whether or not to use the same Bert-type encoder for the queries and document
|
| 72 |
+
projection_dim (`int`, *optional*, defaults to 128):
|
| 73 |
+
Final dimension of the query and document representation after projection
|
| 74 |
+
"""
|
| 75 |
+
model_type = "retribert"
|
| 76 |
+
|
| 77 |
+
def __init__(
|
| 78 |
+
self,
|
| 79 |
+
vocab_size=30522,
|
| 80 |
+
hidden_size=768,
|
| 81 |
+
num_hidden_layers=8,
|
| 82 |
+
num_attention_heads=12,
|
| 83 |
+
intermediate_size=3072,
|
| 84 |
+
hidden_act="gelu",
|
| 85 |
+
hidden_dropout_prob=0.1,
|
| 86 |
+
attention_probs_dropout_prob=0.1,
|
| 87 |
+
max_position_embeddings=512,
|
| 88 |
+
type_vocab_size=2,
|
| 89 |
+
initializer_range=0.02,
|
| 90 |
+
layer_norm_eps=1e-12,
|
| 91 |
+
share_encoders=True,
|
| 92 |
+
projection_dim=128,
|
| 93 |
+
pad_token_id=0,
|
| 94 |
+
**kwargs,
|
| 95 |
+
):
|
| 96 |
+
super().__init__(pad_token_id=pad_token_id, **kwargs)
|
| 97 |
+
|
| 98 |
+
self.vocab_size = vocab_size
|
| 99 |
+
self.hidden_size = hidden_size
|
| 100 |
+
self.num_hidden_layers = num_hidden_layers
|
| 101 |
+
self.num_attention_heads = num_attention_heads
|
| 102 |
+
self.hidden_act = hidden_act
|
| 103 |
+
self.intermediate_size = intermediate_size
|
| 104 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 105 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 106 |
+
self.max_position_embeddings = max_position_embeddings
|
| 107 |
+
self.type_vocab_size = type_vocab_size
|
| 108 |
+
self.initializer_range = initializer_range
|
| 109 |
+
self.layer_norm_eps = layer_norm_eps
|
| 110 |
+
self.share_encoders = share_encoders
|
| 111 |
+
self.projection_dim = projection_dim
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""
|
| 16 |
+
RetriBERT model
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
import math
|
| 21 |
+
from typing import Optional
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.utils.checkpoint as checkpoint
|
| 25 |
+
from torch import nn
|
| 26 |
+
|
| 27 |
+
from ....modeling_utils import PreTrainedModel
|
| 28 |
+
from ....utils import add_start_docstrings, logging
|
| 29 |
+
from ...bert.modeling_bert import BertModel
|
| 30 |
+
from .configuration_retribert import RetriBertConfig
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
logger = logging.get_logger(__name__)
|
| 34 |
+
|
| 35 |
+
RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 36 |
+
"yjernite/retribert-base-uncased",
|
| 37 |
+
# See all RetriBert models at https://huggingface.co/models?filter=retribert
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
|
| 42 |
+
class RetriBertPreTrainedModel(PreTrainedModel):
|
| 43 |
+
"""
|
| 44 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 45 |
+
models.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
config_class = RetriBertConfig
|
| 49 |
+
load_tf_weights = None
|
| 50 |
+
base_model_prefix = "retribert"
|
| 51 |
+
|
| 52 |
+
def _init_weights(self, module):
|
| 53 |
+
"""Initialize the weights"""
|
| 54 |
+
if isinstance(module, nn.Linear):
|
| 55 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 56 |
+
if module.bias is not None:
|
| 57 |
+
module.bias.data.zero_()
|
| 58 |
+
elif isinstance(module, nn.Embedding):
|
| 59 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 60 |
+
if module.padding_idx is not None:
|
| 61 |
+
module.weight.data[module.padding_idx].zero_()
|
| 62 |
+
elif isinstance(module, nn.LayerNorm):
|
| 63 |
+
module.bias.data.zero_()
|
| 64 |
+
module.weight.data.fill_(1.0)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
RETRIBERT_START_DOCSTRING = r"""
|
| 68 |
+
|
| 69 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 70 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 71 |
+
etc.)
|
| 72 |
+
|
| 73 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 74 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 75 |
+
and behavior.
|
| 76 |
+
|
| 77 |
+
Parameters:
|
| 78 |
+
config ([`RetriBertConfig`]): Model configuration class with all the parameters of the model.
|
| 79 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 80 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@add_start_docstrings(
|
| 85 |
+
"""Bert Based model to embed queries or document for document retrieval.""",
|
| 86 |
+
RETRIBERT_START_DOCSTRING,
|
| 87 |
+
)
|
| 88 |
+
class RetriBertModel(RetriBertPreTrainedModel):
|
| 89 |
+
def __init__(self, config: RetriBertConfig) -> None:
|
| 90 |
+
super().__init__(config)
|
| 91 |
+
self.projection_dim = config.projection_dim
|
| 92 |
+
|
| 93 |
+
self.bert_query = BertModel(config)
|
| 94 |
+
self.bert_doc = None if config.share_encoders else BertModel(config)
|
| 95 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 96 |
+
self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
| 97 |
+
self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
| 98 |
+
|
| 99 |
+
self.ce_loss = nn.CrossEntropyLoss(reduction="mean")
|
| 100 |
+
|
| 101 |
+
# Initialize weights and apply final processing
|
| 102 |
+
self.post_init()
|
| 103 |
+
|
| 104 |
+
def embed_sentences_checkpointed(
|
| 105 |
+
self,
|
| 106 |
+
input_ids,
|
| 107 |
+
attention_mask,
|
| 108 |
+
sent_encoder,
|
| 109 |
+
checkpoint_batch_size=-1,
|
| 110 |
+
):
|
| 111 |
+
# reproduces BERT forward pass with checkpointing
|
| 112 |
+
if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
|
| 113 |
+
return sent_encoder(input_ids, attention_mask=attention_mask)[1]
|
| 114 |
+
else:
|
| 115 |
+
# prepare implicit variables
|
| 116 |
+
device = input_ids.device
|
| 117 |
+
input_shape = input_ids.size()
|
| 118 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
| 119 |
+
head_mask = [None] * sent_encoder.config.num_hidden_layers
|
| 120 |
+
extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(
|
| 121 |
+
attention_mask, input_shape
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# define function for checkpointing
|
| 125 |
+
def partial_encode(*inputs):
|
| 126 |
+
encoder_outputs = sent_encoder.encoder(
|
| 127 |
+
inputs[0],
|
| 128 |
+
attention_mask=inputs[1],
|
| 129 |
+
head_mask=head_mask,
|
| 130 |
+
)
|
| 131 |
+
sequence_output = encoder_outputs[0]
|
| 132 |
+
pooled_output = sent_encoder.pooler(sequence_output)
|
| 133 |
+
return pooled_output
|
| 134 |
+
|
| 135 |
+
# run embedding layer on everything at once
|
| 136 |
+
embedding_output = sent_encoder.embeddings(
|
| 137 |
+
input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
|
| 138 |
+
)
|
| 139 |
+
# run encoding and pooling on one mini-batch at a time
|
| 140 |
+
pooled_output_list = []
|
| 141 |
+
for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
|
| 142 |
+
b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
|
| 143 |
+
b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
|
| 144 |
+
pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
|
| 145 |
+
pooled_output_list.append(pooled_output)
|
| 146 |
+
return torch.cat(pooled_output_list, dim=0)
|
| 147 |
+
|
| 148 |
+
def embed_questions(
|
| 149 |
+
self,
|
| 150 |
+
input_ids,
|
| 151 |
+
attention_mask=None,
|
| 152 |
+
checkpoint_batch_size=-1,
|
| 153 |
+
):
|
| 154 |
+
q_reps = self.embed_sentences_checkpointed(
|
| 155 |
+
input_ids,
|
| 156 |
+
attention_mask,
|
| 157 |
+
self.bert_query,
|
| 158 |
+
checkpoint_batch_size,
|
| 159 |
+
)
|
| 160 |
+
return self.project_query(q_reps)
|
| 161 |
+
|
| 162 |
+
def embed_answers(
|
| 163 |
+
self,
|
| 164 |
+
input_ids,
|
| 165 |
+
attention_mask=None,
|
| 166 |
+
checkpoint_batch_size=-1,
|
| 167 |
+
):
|
| 168 |
+
a_reps = self.embed_sentences_checkpointed(
|
| 169 |
+
input_ids,
|
| 170 |
+
attention_mask,
|
| 171 |
+
self.bert_query if self.bert_doc is None else self.bert_doc,
|
| 172 |
+
checkpoint_batch_size,
|
| 173 |
+
)
|
| 174 |
+
return self.project_doc(a_reps)
|
| 175 |
+
|
| 176 |
+
def forward(
|
| 177 |
+
self,
|
| 178 |
+
input_ids_query: torch.LongTensor,
|
| 179 |
+
attention_mask_query: Optional[torch.FloatTensor],
|
| 180 |
+
input_ids_doc: torch.LongTensor,
|
| 181 |
+
attention_mask_doc: Optional[torch.FloatTensor],
|
| 182 |
+
checkpoint_batch_size: int = -1,
|
| 183 |
+
) -> torch.FloatTensor:
|
| 184 |
+
r"""
|
| 185 |
+
Args:
|
| 186 |
+
input_ids_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 187 |
+
Indices of input sequence tokens in the vocabulary for the queries in a batch.
|
| 188 |
+
|
| 189 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 190 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 191 |
+
|
| 192 |
+
[What are input IDs?](../glossary#input-ids)
|
| 193 |
+
attention_mask_query (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 194 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 195 |
+
|
| 196 |
+
- 1 for tokens that are **not masked**,
|
| 197 |
+
- 0 for tokens that are **masked**.
|
| 198 |
+
|
| 199 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 200 |
+
input_ids_doc (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 201 |
+
Indices of input sequence tokens in the vocabulary for the documents in a batch.
|
| 202 |
+
attention_mask_doc (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 203 |
+
Mask to avoid performing attention on documents padding token indices.
|
| 204 |
+
checkpoint_batch_size (`int`, *optional*, defaults to `-1`):
|
| 205 |
+
If greater than 0, uses gradient checkpointing to only compute sequence representation on
|
| 206 |
+
`checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to
|
| 207 |
+
all document representations in the batch.
|
| 208 |
+
|
| 209 |
+
Return:
|
| 210 |
+
`torch.FloatTensor``: The bidirectional cross-entropy loss obtained while trying to match each query to its
|
| 211 |
+
corresponding document and each document to its corresponding query in the batch
|
| 212 |
+
"""
|
| 213 |
+
device = input_ids_query.device
|
| 214 |
+
q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size)
|
| 215 |
+
a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size)
|
| 216 |
+
compare_scores = torch.mm(q_reps, a_reps.t())
|
| 217 |
+
loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
|
| 218 |
+
loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
|
| 219 |
+
loss = (loss_qa + loss_aq) / 2
|
| 220 |
+
return loss
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py
ADDED
|
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for RetriBERT."""
|
| 16 |
+
|
| 17 |
+
import collections
|
| 18 |
+
import os
|
| 19 |
+
import unicodedata
|
| 20 |
+
from typing import List, Optional, Tuple
|
| 21 |
+
|
| 22 |
+
from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
|
| 23 |
+
from ....utils import logging
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__)
|
| 27 |
+
|
| 28 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
|
| 29 |
+
|
| 30 |
+
PRETRAINED_VOCAB_FILES_MAP = {
|
| 31 |
+
"vocab_file": {
|
| 32 |
+
"yjernite/retribert-base-uncased": (
|
| 33 |
+
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
|
| 34 |
+
),
|
| 35 |
+
}
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
| 39 |
+
"yjernite/retribert-base-uncased": 512,
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
PRETRAINED_INIT_CONFIGURATION = {
|
| 44 |
+
"yjernite/retribert-base-uncased": {"do_lower_case": True},
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# Copied from transformers.models.bert.tokenization_bert.load_vocab
|
| 49 |
+
def load_vocab(vocab_file):
|
| 50 |
+
"""Loads a vocabulary file into a dictionary."""
|
| 51 |
+
vocab = collections.OrderedDict()
|
| 52 |
+
with open(vocab_file, "r", encoding="utf-8") as reader:
|
| 53 |
+
tokens = reader.readlines()
|
| 54 |
+
for index, token in enumerate(tokens):
|
| 55 |
+
token = token.rstrip("\n")
|
| 56 |
+
vocab[token] = index
|
| 57 |
+
return vocab
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
|
| 61 |
+
def whitespace_tokenize(text):
|
| 62 |
+
"""Runs basic whitespace cleaning and splitting on a piece of text."""
|
| 63 |
+
text = text.strip()
|
| 64 |
+
if not text:
|
| 65 |
+
return []
|
| 66 |
+
tokens = text.split()
|
| 67 |
+
return tokens
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class RetriBertTokenizer(PreTrainedTokenizer):
|
| 71 |
+
r"""
|
| 72 |
+
Constructs a RetriBERT tokenizer.
|
| 73 |
+
|
| 74 |
+
[`RetriBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting
|
| 75 |
+
and wordpiece.
|
| 76 |
+
|
| 77 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer
|
| 78 |
+
to: this superclass for more information regarding those methods.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
vocab_file (`str`):
|
| 82 |
+
File containing the vocabulary.
|
| 83 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
| 84 |
+
Whether or not to lowercase the input when tokenizing.
|
| 85 |
+
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
|
| 86 |
+
Whether or not to do basic tokenization before WordPiece.
|
| 87 |
+
never_split (`Iterable`, *optional*):
|
| 88 |
+
Collection of tokens which will never be split during tokenization. Only has an effect when
|
| 89 |
+
`do_basic_tokenize=True`
|
| 90 |
+
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
|
| 91 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 92 |
+
token instead.
|
| 93 |
+
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
|
| 94 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
| 95 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
| 96 |
+
token of a sequence built with special tokens.
|
| 97 |
+
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
|
| 98 |
+
The token used for padding, for example when batching sequences of different lengths.
|
| 99 |
+
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
|
| 100 |
+
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
| 101 |
+
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
| 102 |
+
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
|
| 103 |
+
The token used for masking values. This is the token used when training this model with masked language
|
| 104 |
+
modeling. This is the token which the model will try to predict.
|
| 105 |
+
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
|
| 106 |
+
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
|
| 107 |
+
[issue](https://github.com/huggingface/transformers/issues/328)).
|
| 108 |
+
strip_accents (`bool`, *optional*):
|
| 109 |
+
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
| 110 |
+
value for `lowercase` (as in the original BERT).
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 114 |
+
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
| 115 |
+
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
| 116 |
+
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
|
| 117 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 118 |
+
|
| 119 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.__init__
|
| 120 |
+
def __init__(
|
| 121 |
+
self,
|
| 122 |
+
vocab_file,
|
| 123 |
+
do_lower_case=True,
|
| 124 |
+
do_basic_tokenize=True,
|
| 125 |
+
never_split=None,
|
| 126 |
+
unk_token="[UNK]",
|
| 127 |
+
sep_token="[SEP]",
|
| 128 |
+
pad_token="[PAD]",
|
| 129 |
+
cls_token="[CLS]",
|
| 130 |
+
mask_token="[MASK]",
|
| 131 |
+
tokenize_chinese_chars=True,
|
| 132 |
+
strip_accents=None,
|
| 133 |
+
**kwargs,
|
| 134 |
+
):
|
| 135 |
+
super().__init__(
|
| 136 |
+
do_lower_case=do_lower_case,
|
| 137 |
+
do_basic_tokenize=do_basic_tokenize,
|
| 138 |
+
never_split=never_split,
|
| 139 |
+
unk_token=unk_token,
|
| 140 |
+
sep_token=sep_token,
|
| 141 |
+
pad_token=pad_token,
|
| 142 |
+
cls_token=cls_token,
|
| 143 |
+
mask_token=mask_token,
|
| 144 |
+
tokenize_chinese_chars=tokenize_chinese_chars,
|
| 145 |
+
strip_accents=strip_accents,
|
| 146 |
+
**kwargs,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
if not os.path.isfile(vocab_file):
|
| 150 |
+
raise ValueError(
|
| 151 |
+
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
|
| 152 |
+
" model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
|
| 153 |
+
)
|
| 154 |
+
self.vocab = load_vocab(vocab_file)
|
| 155 |
+
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
|
| 156 |
+
self.do_basic_tokenize = do_basic_tokenize
|
| 157 |
+
if do_basic_tokenize:
|
| 158 |
+
self.basic_tokenizer = BasicTokenizer(
|
| 159 |
+
do_lower_case=do_lower_case,
|
| 160 |
+
never_split=never_split,
|
| 161 |
+
tokenize_chinese_chars=tokenize_chinese_chars,
|
| 162 |
+
strip_accents=strip_accents,
|
| 163 |
+
)
|
| 164 |
+
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
|
| 165 |
+
|
| 166 |
+
@property
|
| 167 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
|
| 168 |
+
def do_lower_case(self):
|
| 169 |
+
return self.basic_tokenizer.do_lower_case
|
| 170 |
+
|
| 171 |
+
@property
|
| 172 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
|
| 173 |
+
def vocab_size(self):
|
| 174 |
+
return len(self.vocab)
|
| 175 |
+
|
| 176 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
|
| 177 |
+
def get_vocab(self):
|
| 178 |
+
return dict(self.vocab, **self.added_tokens_encoder)
|
| 179 |
+
|
| 180 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
|
| 181 |
+
def _tokenize(self, text, split_special_tokens=False):
|
| 182 |
+
split_tokens = []
|
| 183 |
+
if self.do_basic_tokenize:
|
| 184 |
+
for token in self.basic_tokenizer.tokenize(
|
| 185 |
+
text, never_split=self.all_special_tokens if not split_special_tokens else None
|
| 186 |
+
):
|
| 187 |
+
# If the token is part of the never_split set
|
| 188 |
+
if token in self.basic_tokenizer.never_split:
|
| 189 |
+
split_tokens.append(token)
|
| 190 |
+
else:
|
| 191 |
+
split_tokens += self.wordpiece_tokenizer.tokenize(token)
|
| 192 |
+
else:
|
| 193 |
+
split_tokens = self.wordpiece_tokenizer.tokenize(text)
|
| 194 |
+
return split_tokens
|
| 195 |
+
|
| 196 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
|
| 197 |
+
def _convert_token_to_id(self, token):
|
| 198 |
+
"""Converts a token (str) in an id using the vocab."""
|
| 199 |
+
return self.vocab.get(token, self.vocab.get(self.unk_token))
|
| 200 |
+
|
| 201 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
|
| 202 |
+
def _convert_id_to_token(self, index):
|
| 203 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
| 204 |
+
return self.ids_to_tokens.get(index, self.unk_token)
|
| 205 |
+
|
| 206 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
|
| 207 |
+
def convert_tokens_to_string(self, tokens):
|
| 208 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
| 209 |
+
out_string = " ".join(tokens).replace(" ##", "").strip()
|
| 210 |
+
return out_string
|
| 211 |
+
|
| 212 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
|
| 213 |
+
def build_inputs_with_special_tokens(
|
| 214 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 215 |
+
) -> List[int]:
|
| 216 |
+
"""
|
| 217 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
| 218 |
+
adding special tokens. A BERT sequence has the following format:
|
| 219 |
+
|
| 220 |
+
- single sequence: `[CLS] X [SEP]`
|
| 221 |
+
- pair of sequences: `[CLS] A [SEP] B [SEP]`
|
| 222 |
+
|
| 223 |
+
Args:
|
| 224 |
+
token_ids_0 (`List[int]`):
|
| 225 |
+
List of IDs to which the special tokens will be added.
|
| 226 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 227 |
+
Optional second list of IDs for sequence pairs.
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
| 231 |
+
"""
|
| 232 |
+
if token_ids_1 is None:
|
| 233 |
+
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
| 234 |
+
cls = [self.cls_token_id]
|
| 235 |
+
sep = [self.sep_token_id]
|
| 236 |
+
return cls + token_ids_0 + sep + token_ids_1 + sep
|
| 237 |
+
|
| 238 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
|
| 239 |
+
def get_special_tokens_mask(
|
| 240 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
| 241 |
+
) -> List[int]:
|
| 242 |
+
"""
|
| 243 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
| 244 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
| 245 |
+
|
| 246 |
+
Args:
|
| 247 |
+
token_ids_0 (`List[int]`):
|
| 248 |
+
List of IDs.
|
| 249 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 250 |
+
Optional second list of IDs for sequence pairs.
|
| 251 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
| 252 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
| 256 |
+
"""
|
| 257 |
+
|
| 258 |
+
if already_has_special_tokens:
|
| 259 |
+
return super().get_special_tokens_mask(
|
| 260 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
if token_ids_1 is not None:
|
| 264 |
+
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
|
| 265 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
| 266 |
+
|
| 267 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
|
| 268 |
+
def create_token_type_ids_from_sequences(
|
| 269 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 270 |
+
) -> List[int]:
|
| 271 |
+
"""
|
| 272 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
|
| 273 |
+
pair mask has the following format:
|
| 274 |
+
|
| 275 |
+
```
|
| 276 |
+
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
| 277 |
+
| first sequence | second sequence |
|
| 278 |
+
```
|
| 279 |
+
|
| 280 |
+
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
token_ids_0 (`List[int]`):
|
| 284 |
+
List of IDs.
|
| 285 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 286 |
+
Optional second list of IDs for sequence pairs.
|
| 287 |
+
|
| 288 |
+
Returns:
|
| 289 |
+
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
| 290 |
+
"""
|
| 291 |
+
sep = [self.sep_token_id]
|
| 292 |
+
cls = [self.cls_token_id]
|
| 293 |
+
if token_ids_1 is None:
|
| 294 |
+
return len(cls + token_ids_0 + sep) * [0]
|
| 295 |
+
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
| 296 |
+
|
| 297 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
|
| 298 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 299 |
+
index = 0
|
| 300 |
+
if os.path.isdir(save_directory):
|
| 301 |
+
vocab_file = os.path.join(
|
| 302 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| 303 |
+
)
|
| 304 |
+
else:
|
| 305 |
+
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
|
| 306 |
+
with open(vocab_file, "w", encoding="utf-8") as writer:
|
| 307 |
+
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
|
| 308 |
+
if index != token_index:
|
| 309 |
+
logger.warning(
|
| 310 |
+
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
|
| 311 |
+
" Please check that the vocabulary is not corrupted!"
|
| 312 |
+
)
|
| 313 |
+
index = token_index
|
| 314 |
+
writer.write(token + "\n")
|
| 315 |
+
index += 1
|
| 316 |
+
return (vocab_file,)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
|
| 320 |
+
class BasicTokenizer(object):
|
| 321 |
+
"""
|
| 322 |
+
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
|
| 323 |
+
|
| 324 |
+
Args:
|
| 325 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
| 326 |
+
Whether or not to lowercase the input when tokenizing.
|
| 327 |
+
never_split (`Iterable`, *optional*):
|
| 328 |
+
Collection of tokens which will never be split during tokenization. Only has an effect when
|
| 329 |
+
`do_basic_tokenize=True`
|
| 330 |
+
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
|
| 331 |
+
Whether or not to tokenize Chinese characters.
|
| 332 |
+
|
| 333 |
+
This should likely be deactivated for Japanese (see this
|
| 334 |
+
[issue](https://github.com/huggingface/transformers/issues/328)).
|
| 335 |
+
strip_accents (`bool`, *optional*):
|
| 336 |
+
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
| 337 |
+
value for `lowercase` (as in the original BERT).
|
| 338 |
+
do_split_on_punc (`bool`, *optional*, defaults to `True`):
|
| 339 |
+
In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
|
| 340 |
+
the full context of the words, such as contractions.
|
| 341 |
+
"""
|
| 342 |
+
|
| 343 |
+
def __init__(
|
| 344 |
+
self,
|
| 345 |
+
do_lower_case=True,
|
| 346 |
+
never_split=None,
|
| 347 |
+
tokenize_chinese_chars=True,
|
| 348 |
+
strip_accents=None,
|
| 349 |
+
do_split_on_punc=True,
|
| 350 |
+
):
|
| 351 |
+
if never_split is None:
|
| 352 |
+
never_split = []
|
| 353 |
+
self.do_lower_case = do_lower_case
|
| 354 |
+
self.never_split = set(never_split)
|
| 355 |
+
self.tokenize_chinese_chars = tokenize_chinese_chars
|
| 356 |
+
self.strip_accents = strip_accents
|
| 357 |
+
self.do_split_on_punc = do_split_on_punc
|
| 358 |
+
|
| 359 |
+
def tokenize(self, text, never_split=None):
|
| 360 |
+
"""
|
| 361 |
+
Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
|
| 362 |
+
|
| 363 |
+
Args:
|
| 364 |
+
never_split (`List[str]`, *optional*)
|
| 365 |
+
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
|
| 366 |
+
[`PreTrainedTokenizer.tokenize`]) List of token not to split.
|
| 367 |
+
"""
|
| 368 |
+
# union() returns a new set by concatenating the two sets.
|
| 369 |
+
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
|
| 370 |
+
text = self._clean_text(text)
|
| 371 |
+
|
| 372 |
+
# This was added on November 1st, 2018 for the multilingual and Chinese
|
| 373 |
+
# models. This is also applied to the English models now, but it doesn't
|
| 374 |
+
# matter since the English models were not trained on any Chinese data
|
| 375 |
+
# and generally don't have any Chinese data in them (there are Chinese
|
| 376 |
+
# characters in the vocabulary because Wikipedia does have some Chinese
|
| 377 |
+
# words in the English Wikipedia.).
|
| 378 |
+
if self.tokenize_chinese_chars:
|
| 379 |
+
text = self._tokenize_chinese_chars(text)
|
| 380 |
+
# prevents treating the same character with different unicode codepoints as different characters
|
| 381 |
+
unicode_normalized_text = unicodedata.normalize("NFC", text)
|
| 382 |
+
orig_tokens = whitespace_tokenize(unicode_normalized_text)
|
| 383 |
+
split_tokens = []
|
| 384 |
+
for token in orig_tokens:
|
| 385 |
+
if token not in never_split:
|
| 386 |
+
if self.do_lower_case:
|
| 387 |
+
token = token.lower()
|
| 388 |
+
if self.strip_accents is not False:
|
| 389 |
+
token = self._run_strip_accents(token)
|
| 390 |
+
elif self.strip_accents:
|
| 391 |
+
token = self._run_strip_accents(token)
|
| 392 |
+
split_tokens.extend(self._run_split_on_punc(token, never_split))
|
| 393 |
+
|
| 394 |
+
output_tokens = whitespace_tokenize(" ".join(split_tokens))
|
| 395 |
+
return output_tokens
|
| 396 |
+
|
| 397 |
+
def _run_strip_accents(self, text):
|
| 398 |
+
"""Strips accents from a piece of text."""
|
| 399 |
+
text = unicodedata.normalize("NFD", text)
|
| 400 |
+
output = []
|
| 401 |
+
for char in text:
|
| 402 |
+
cat = unicodedata.category(char)
|
| 403 |
+
if cat == "Mn":
|
| 404 |
+
continue
|
| 405 |
+
output.append(char)
|
| 406 |
+
return "".join(output)
|
| 407 |
+
|
| 408 |
+
def _run_split_on_punc(self, text, never_split=None):
|
| 409 |
+
"""Splits punctuation on a piece of text."""
|
| 410 |
+
if not self.do_split_on_punc or (never_split is not None and text in never_split):
|
| 411 |
+
return [text]
|
| 412 |
+
chars = list(text)
|
| 413 |
+
i = 0
|
| 414 |
+
start_new_word = True
|
| 415 |
+
output = []
|
| 416 |
+
while i < len(chars):
|
| 417 |
+
char = chars[i]
|
| 418 |
+
if _is_punctuation(char):
|
| 419 |
+
output.append([char])
|
| 420 |
+
start_new_word = True
|
| 421 |
+
else:
|
| 422 |
+
if start_new_word:
|
| 423 |
+
output.append([])
|
| 424 |
+
start_new_word = False
|
| 425 |
+
output[-1].append(char)
|
| 426 |
+
i += 1
|
| 427 |
+
|
| 428 |
+
return ["".join(x) for x in output]
|
| 429 |
+
|
| 430 |
+
def _tokenize_chinese_chars(self, text):
|
| 431 |
+
"""Adds whitespace around any CJK character."""
|
| 432 |
+
output = []
|
| 433 |
+
for char in text:
|
| 434 |
+
cp = ord(char)
|
| 435 |
+
if self._is_chinese_char(cp):
|
| 436 |
+
output.append(" ")
|
| 437 |
+
output.append(char)
|
| 438 |
+
output.append(" ")
|
| 439 |
+
else:
|
| 440 |
+
output.append(char)
|
| 441 |
+
return "".join(output)
|
| 442 |
+
|
| 443 |
+
def _is_chinese_char(self, cp):
|
| 444 |
+
"""Checks whether CP is the codepoint of a CJK character."""
|
| 445 |
+
# This defines a "chinese character" as anything in the CJK Unicode block:
|
| 446 |
+
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
|
| 447 |
+
#
|
| 448 |
+
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
|
| 449 |
+
# despite its name. The modern Korean Hangul alphabet is a different block,
|
| 450 |
+
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
|
| 451 |
+
# space-separated words, so they are not treated specially and handled
|
| 452 |
+
# like the all of the other languages.
|
| 453 |
+
if (
|
| 454 |
+
(cp >= 0x4E00 and cp <= 0x9FFF)
|
| 455 |
+
or (cp >= 0x3400 and cp <= 0x4DBF) #
|
| 456 |
+
or (cp >= 0x20000 and cp <= 0x2A6DF) #
|
| 457 |
+
or (cp >= 0x2A700 and cp <= 0x2B73F) #
|
| 458 |
+
or (cp >= 0x2B740 and cp <= 0x2B81F) #
|
| 459 |
+
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
|
| 460 |
+
or (cp >= 0xF900 and cp <= 0xFAFF)
|
| 461 |
+
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
|
| 462 |
+
): #
|
| 463 |
+
return True
|
| 464 |
+
|
| 465 |
+
return False
|
| 466 |
+
|
| 467 |
+
def _clean_text(self, text):
|
| 468 |
+
"""Performs invalid character removal and whitespace cleanup on text."""
|
| 469 |
+
output = []
|
| 470 |
+
for char in text:
|
| 471 |
+
cp = ord(char)
|
| 472 |
+
if cp == 0 or cp == 0xFFFD or _is_control(char):
|
| 473 |
+
continue
|
| 474 |
+
if _is_whitespace(char):
|
| 475 |
+
output.append(" ")
|
| 476 |
+
else:
|
| 477 |
+
output.append(char)
|
| 478 |
+
return "".join(output)
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
|
| 482 |
+
class WordpieceTokenizer(object):
|
| 483 |
+
"""Runs WordPiece tokenization."""
|
| 484 |
+
|
| 485 |
+
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
|
| 486 |
+
self.vocab = vocab
|
| 487 |
+
self.unk_token = unk_token
|
| 488 |
+
self.max_input_chars_per_word = max_input_chars_per_word
|
| 489 |
+
|
| 490 |
+
def tokenize(self, text):
|
| 491 |
+
"""
|
| 492 |
+
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
|
| 493 |
+
tokenization using the given vocabulary.
|
| 494 |
+
|
| 495 |
+
For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
|
| 496 |
+
|
| 497 |
+
Args:
|
| 498 |
+
text: A single token or whitespace separated tokens. This should have
|
| 499 |
+
already been passed through *BasicTokenizer*.
|
| 500 |
+
|
| 501 |
+
Returns:
|
| 502 |
+
A list of wordpiece tokens.
|
| 503 |
+
"""
|
| 504 |
+
|
| 505 |
+
output_tokens = []
|
| 506 |
+
for token in whitespace_tokenize(text):
|
| 507 |
+
chars = list(token)
|
| 508 |
+
if len(chars) > self.max_input_chars_per_word:
|
| 509 |
+
output_tokens.append(self.unk_token)
|
| 510 |
+
continue
|
| 511 |
+
|
| 512 |
+
is_bad = False
|
| 513 |
+
start = 0
|
| 514 |
+
sub_tokens = []
|
| 515 |
+
while start < len(chars):
|
| 516 |
+
end = len(chars)
|
| 517 |
+
cur_substr = None
|
| 518 |
+
while start < end:
|
| 519 |
+
substr = "".join(chars[start:end])
|
| 520 |
+
if start > 0:
|
| 521 |
+
substr = "##" + substr
|
| 522 |
+
if substr in self.vocab:
|
| 523 |
+
cur_substr = substr
|
| 524 |
+
break
|
| 525 |
+
end -= 1
|
| 526 |
+
if cur_substr is None:
|
| 527 |
+
is_bad = True
|
| 528 |
+
break
|
| 529 |
+
sub_tokens.append(cur_substr)
|
| 530 |
+
start = end
|
| 531 |
+
|
| 532 |
+
if is_bad:
|
| 533 |
+
output_tokens.append(self.unk_token)
|
| 534 |
+
else:
|
| 535 |
+
output_tokens.extend(sub_tokens)
|
| 536 |
+
return output_tokens
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__init__.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_import_structure = {
|
| 20 |
+
"configuration_flava": [
|
| 21 |
+
"FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
| 22 |
+
"FlavaConfig",
|
| 23 |
+
"FlavaImageCodebookConfig",
|
| 24 |
+
"FlavaImageConfig",
|
| 25 |
+
"FlavaMultimodalConfig",
|
| 26 |
+
"FlavaTextConfig",
|
| 27 |
+
],
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
if not is_vision_available():
|
| 32 |
+
raise OptionalDependencyNotAvailable()
|
| 33 |
+
except OptionalDependencyNotAvailable:
|
| 34 |
+
pass
|
| 35 |
+
else:
|
| 36 |
+
_import_structure["feature_extraction_flava"] = ["FlavaFeatureExtractor"]
|
| 37 |
+
_import_structure["image_processing_flava"] = ["FlavaImageProcessor"]
|
| 38 |
+
_import_structure["processing_flava"] = ["FlavaProcessor"]
|
| 39 |
+
|
| 40 |
+
try:
|
| 41 |
+
if not is_torch_available():
|
| 42 |
+
raise OptionalDependencyNotAvailable()
|
| 43 |
+
except OptionalDependencyNotAvailable:
|
| 44 |
+
pass
|
| 45 |
+
else:
|
| 46 |
+
_import_structure["modeling_flava"] = [
|
| 47 |
+
"FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST",
|
| 48 |
+
"FlavaForPreTraining",
|
| 49 |
+
"FlavaImageCodebook",
|
| 50 |
+
"FlavaImageModel",
|
| 51 |
+
"FlavaModel",
|
| 52 |
+
"FlavaMultimodalModel",
|
| 53 |
+
"FlavaPreTrainedModel",
|
| 54 |
+
"FlavaTextModel",
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
if TYPE_CHECKING:
|
| 58 |
+
from .configuration_flava import (
|
| 59 |
+
FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
| 60 |
+
FlavaConfig,
|
| 61 |
+
FlavaImageCodebookConfig,
|
| 62 |
+
FlavaImageConfig,
|
| 63 |
+
FlavaMultimodalConfig,
|
| 64 |
+
FlavaTextConfig,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
try:
|
| 68 |
+
if not is_vision_available():
|
| 69 |
+
raise OptionalDependencyNotAvailable()
|
| 70 |
+
except OptionalDependencyNotAvailable:
|
| 71 |
+
pass
|
| 72 |
+
else:
|
| 73 |
+
from .feature_extraction_flava import FlavaFeatureExtractor
|
| 74 |
+
from .image_processing_flava import FlavaImageProcessor
|
| 75 |
+
from .processing_flava import FlavaProcessor
|
| 76 |
+
|
| 77 |
+
try:
|
| 78 |
+
if not is_torch_available():
|
| 79 |
+
raise OptionalDependencyNotAvailable()
|
| 80 |
+
except OptionalDependencyNotAvailable:
|
| 81 |
+
pass
|
| 82 |
+
else:
|
| 83 |
+
from .modeling_flava import (
|
| 84 |
+
FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
| 85 |
+
FlavaForPreTraining,
|
| 86 |
+
FlavaImageCodebook,
|
| 87 |
+
FlavaImageModel,
|
| 88 |
+
FlavaModel,
|
| 89 |
+
FlavaMultimodalModel,
|
| 90 |
+
FlavaPreTrainedModel,
|
| 91 |
+
FlavaTextModel,
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
else:
|
| 95 |
+
import sys
|
| 96 |
+
|
| 97 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_dalle_to_flava_codebook.cpython-310.pyc
ADDED
|
Binary file (2.58 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_flava_original_pytorch_to_hf.cpython-310.pyc
ADDED
|
Binary file (3.31 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/feature_extraction_flava.cpython-310.pyc
ADDED
|
Binary file (997 Bytes). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/image_processing_flava.cpython-310.pyc
ADDED
|
Binary file (27.3 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/modeling_flava.cpython-310.pyc
ADDED
|
Binary file (67.3 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/__pycache__/processing_flava.cpython-310.pyc
ADDED
|
Binary file (5.26 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/configuration_flava.py
ADDED
|
@@ -0,0 +1,764 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" FLAVA model configurations"""
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
from typing import Any, Dict, Union
|
| 19 |
+
|
| 20 |
+
from ...configuration_utils import PretrainedConfig
|
| 21 |
+
from ...utils import logging
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 27 |
+
"facebook/flava-full": "https://huggingface.co/facebook/flava-full/resolve/main/config.json",
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class FlavaImageConfig(PretrainedConfig):
|
| 32 |
+
r"""
|
| 33 |
+
This is the configuration class to store the configuration of a [`FlavaImageModel`]. It is used to instantiate an
|
| 34 |
+
FLAVA model according to the specified arguments, defining the model architecture.
|
| 35 |
+
|
| 36 |
+
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
|
| 37 |
+
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
|
| 38 |
+
|
| 39 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 40 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 45 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 46 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 47 |
+
Number of hidden layers in the Transformer encoder.
|
| 48 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 49 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 50 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 51 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 52 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 53 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 54 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
| 55 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 56 |
+
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
|
| 57 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 58 |
+
The dropout ratio for the attention probabilities.
|
| 59 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 60 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 61 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 62 |
+
The epsilon used by the layer normalization layers.
|
| 63 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 64 |
+
The size (resolution) of each image.
|
| 65 |
+
patch_size (`int`, *optional*, defaults to 16):
|
| 66 |
+
The size (resolution) of each patch.
|
| 67 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 68 |
+
The number of input channels.
|
| 69 |
+
qkv_bias (`bool`, *optional*, defaults to `True`):
|
| 70 |
+
Whether to add a bias to the queries, keys and values.
|
| 71 |
+
mask_token (`bool`, *optional*, defaults to `True`):
|
| 72 |
+
Whether to use a mask token or not. Used in MIM (Masked Image Modeling) loss for FLAVA.
|
| 73 |
+
vocab_size (`int`, *optional*, defaults to 8192):
|
| 74 |
+
Vocabulary size of the [`FlavaImageCodebook`] used in conjunction with [`FlavaImageModel`] for MIM (Masked
|
| 75 |
+
Image Modeling) loss for FLAVA.
|
| 76 |
+
|
| 77 |
+
Example:
|
| 78 |
+
|
| 79 |
+
```python
|
| 80 |
+
>>> from transformers import FlavaImageConfig, FlavaImageModel
|
| 81 |
+
|
| 82 |
+
>>> # Initializing a FlavaImageModel with style configuration
|
| 83 |
+
>>> configuration = FlavaImageConfig()
|
| 84 |
+
|
| 85 |
+
>>> # Initializing a FlavaImageModel model (with random weights) from the style configuration
|
| 86 |
+
>>> model = FlavaImageModel(configuration)
|
| 87 |
+
|
| 88 |
+
>>> # Accessing the model configuration
|
| 89 |
+
>>> configuration = model.config
|
| 90 |
+
```"""
|
| 91 |
+
|
| 92 |
+
model_type = "flava_image_model"
|
| 93 |
+
|
| 94 |
+
def __init__(
|
| 95 |
+
self,
|
| 96 |
+
hidden_size: int = 768,
|
| 97 |
+
num_hidden_layers: int = 12,
|
| 98 |
+
num_attention_heads: int = 12,
|
| 99 |
+
intermediate_size: int = 3072,
|
| 100 |
+
hidden_act: int = "gelu",
|
| 101 |
+
hidden_dropout_prob: float = 0.0,
|
| 102 |
+
attention_probs_dropout_prob: float = 0.0,
|
| 103 |
+
initializer_range: float = 0.02,
|
| 104 |
+
layer_norm_eps: float = 1e-12,
|
| 105 |
+
image_size: int = 224,
|
| 106 |
+
patch_size: int = 16,
|
| 107 |
+
num_channels: int = 3,
|
| 108 |
+
qkv_bias: bool = True,
|
| 109 |
+
mask_token: bool = True,
|
| 110 |
+
vocab_size: int = 8192,
|
| 111 |
+
**kwargs,
|
| 112 |
+
):
|
| 113 |
+
super().__init__(**kwargs)
|
| 114 |
+
|
| 115 |
+
self.hidden_size = hidden_size
|
| 116 |
+
self.num_hidden_layers = num_hidden_layers
|
| 117 |
+
self.num_attention_heads = num_attention_heads
|
| 118 |
+
self.intermediate_size = intermediate_size
|
| 119 |
+
self.hidden_act = hidden_act
|
| 120 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 121 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 122 |
+
self.initializer_range = initializer_range
|
| 123 |
+
self.layer_norm_eps = layer_norm_eps
|
| 124 |
+
self.image_size = image_size
|
| 125 |
+
self.patch_size = patch_size
|
| 126 |
+
self.num_channels = num_channels
|
| 127 |
+
self.qkv_bias = qkv_bias
|
| 128 |
+
self.mask_token = mask_token
|
| 129 |
+
self.vocab_size = vocab_size
|
| 130 |
+
|
| 131 |
+
@classmethod
|
| 132 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
| 133 |
+
cls._set_token_in_kwargs(kwargs)
|
| 134 |
+
|
| 135 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 136 |
+
|
| 137 |
+
# get the image config dict if we are loading from FlavaConfig
|
| 138 |
+
if config_dict.get("model_type") == "flava":
|
| 139 |
+
config_dict = config_dict["image_config"]
|
| 140 |
+
|
| 141 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
| 142 |
+
logger.warning(
|
| 143 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 144 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
return cls.from_dict(config_dict, **kwargs)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class FlavaTextConfig(PretrainedConfig):
|
| 151 |
+
r"""
|
| 152 |
+
This is the configuration class to store the configuration of a [`FlavaTextModel`]. It is used to instantiate an
|
| 153 |
+
FLAVA model according to the specified arguments, defining the model architecture.
|
| 154 |
+
|
| 155 |
+
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
|
| 156 |
+
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
|
| 157 |
+
|
| 158 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 159 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
| 164 |
+
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
|
| 165 |
+
`inputs_ids` passed when calling [`FlavaTextModel`].
|
| 166 |
+
type_vocab_size (`int`, *optional*, defaults to 2):
|
| 167 |
+
The vocabulary size of the `token_type_ids` passed when calling [`FlavaTextModel`]. Note that even though
|
| 168 |
+
text encoder allows `token_type_ids`'s value as 2, for text-only pretraining and fine-tuning, only 1 is
|
| 169 |
+
used similar to RoBERTa.
|
| 170 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
| 171 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 172 |
+
just in case (e.g., 512 or 1024 or 2048). For VL, max_length passed to model is 77.
|
| 173 |
+
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
|
| 174 |
+
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
|
| 175 |
+
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
|
| 176 |
+
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
|
| 177 |
+
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
|
| 178 |
+
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
|
| 179 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 180 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 181 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 182 |
+
Number of hidden layers in the Transformer encoder.
|
| 183 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 184 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 185 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 186 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 187 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 188 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 189 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
| 190 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 191 |
+
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
|
| 192 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 193 |
+
The dropout ratio for the attention probabilities.
|
| 194 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 195 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 196 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 197 |
+
The epsilon used by the layer normalization layers.
|
| 198 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 199 |
+
The size (resolution) of each image.
|
| 200 |
+
patch_size (`int`, *optional*, defaults to 16):
|
| 201 |
+
The size (resolution) of each patch.
|
| 202 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 203 |
+
The number of input channels.
|
| 204 |
+
qkv_bias (`bool`, *optional*, defaults to `True`):
|
| 205 |
+
Whether to add a bias to the queries, keys and values.
|
| 206 |
+
|
| 207 |
+
Example:
|
| 208 |
+
|
| 209 |
+
```python
|
| 210 |
+
>>> from transformers import FlavaTextConfig, FlavaTextModel
|
| 211 |
+
|
| 212 |
+
>>> # Initializing a FlavaTextModel with style configuration
|
| 213 |
+
>>> configuration = FlavaTextConfig()
|
| 214 |
+
|
| 215 |
+
>>> # Initializing a FlavaTextModel model (with random weights) from the style configuration
|
| 216 |
+
>>> model = FlavaTextModel(configuration)
|
| 217 |
+
|
| 218 |
+
>>> # Accessing the model configuration
|
| 219 |
+
>>> configuration = model.config
|
| 220 |
+
```"""
|
| 221 |
+
model_type = "flava_text_model"
|
| 222 |
+
|
| 223 |
+
def __init__(
|
| 224 |
+
self,
|
| 225 |
+
vocab_size: int = 30522,
|
| 226 |
+
type_vocab_size: int = 2,
|
| 227 |
+
max_position_embeddings: int = 512,
|
| 228 |
+
position_embedding_type: str = "absolute",
|
| 229 |
+
hidden_size: int = 768,
|
| 230 |
+
num_hidden_layers: int = 12,
|
| 231 |
+
num_attention_heads: int = 12,
|
| 232 |
+
intermediate_size: int = 3072,
|
| 233 |
+
hidden_act: str = "gelu",
|
| 234 |
+
hidden_dropout_prob: float = 0.0,
|
| 235 |
+
attention_probs_dropout_prob: float = 0.0,
|
| 236 |
+
initializer_range: float = 0.02,
|
| 237 |
+
layer_norm_eps: float = 1e-12,
|
| 238 |
+
pad_token_id: int = 0,
|
| 239 |
+
qkv_bias: bool = True,
|
| 240 |
+
**kwargs,
|
| 241 |
+
):
|
| 242 |
+
super().__init__(**kwargs)
|
| 243 |
+
|
| 244 |
+
self.vocab_size = vocab_size
|
| 245 |
+
self.type_vocab_size = type_vocab_size
|
| 246 |
+
self.max_position_embeddings = max_position_embeddings
|
| 247 |
+
self.position_embedding_type = position_embedding_type
|
| 248 |
+
self.hidden_size = hidden_size
|
| 249 |
+
self.num_hidden_layers = num_hidden_layers
|
| 250 |
+
self.num_attention_heads = num_attention_heads
|
| 251 |
+
self.intermediate_size = intermediate_size
|
| 252 |
+
self.hidden_act = hidden_act
|
| 253 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 254 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 255 |
+
self.initializer_range = initializer_range
|
| 256 |
+
self.layer_norm_eps = layer_norm_eps
|
| 257 |
+
self.qkv_bias = qkv_bias
|
| 258 |
+
self.pad_token_id = pad_token_id
|
| 259 |
+
|
| 260 |
+
@classmethod
|
| 261 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
| 262 |
+
cls._set_token_in_kwargs(kwargs)
|
| 263 |
+
|
| 264 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 265 |
+
|
| 266 |
+
# get the text config dict if we are loading from FlavaConfig
|
| 267 |
+
if config_dict.get("model_type") == "flava":
|
| 268 |
+
config_dict = config_dict["text_config"]
|
| 269 |
+
|
| 270 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
| 271 |
+
logger.warning(
|
| 272 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 273 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
return cls.from_dict(config_dict, **kwargs)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class FlavaMultimodalConfig(PretrainedConfig):
|
| 280 |
+
r"""
|
| 281 |
+
This is the configuration class to store the configuration of a [`FlavaMultimodalModel`]. It is used to instantiate
|
| 282 |
+
an FLAVA model according to the specified arguments, defining the model architecture.
|
| 283 |
+
|
| 284 |
+
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
|
| 285 |
+
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
|
| 286 |
+
|
| 287 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 288 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 293 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 294 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 295 |
+
Number of hidden layers in the Transformer encoder.
|
| 296 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 297 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 298 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 299 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 300 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 301 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 302 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
| 303 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 304 |
+
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
|
| 305 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 306 |
+
The dropout ratio for the attention probabilities.
|
| 307 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 308 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 309 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 310 |
+
The epsilon used by the layer normalization layers.
|
| 311 |
+
qkv_bias (`bool`, *optional*, defaults to `True`):
|
| 312 |
+
Whether to add a bias to the queries, keys and values.
|
| 313 |
+
use_cls_token (`bool`, *optional*, defaults to `True`):
|
| 314 |
+
Whether to use an extra CLS token for multimodal settings. Usually needed by the FLAVA model.
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
Example:
|
| 318 |
+
|
| 319 |
+
```python
|
| 320 |
+
>>> from transformers import FlavaMultimodalConfig, FlavaMultimodalModel
|
| 321 |
+
|
| 322 |
+
>>> # Initializing a FlavaMultimodalModel with style configuration
|
| 323 |
+
>>> configuration = FlavaMultimodalConfig()
|
| 324 |
+
|
| 325 |
+
>>> # Initializing a FlavaMultimodalModel model (with random weights) from the style configuration
|
| 326 |
+
>>> model = FlavaMultimodalModel(configuration)
|
| 327 |
+
|
| 328 |
+
>>> # Accessing the model configuration
|
| 329 |
+
>>> configuration = model.config
|
| 330 |
+
```"""
|
| 331 |
+
|
| 332 |
+
model_type = "flava_multimodal_model"
|
| 333 |
+
|
| 334 |
+
def __init__(
|
| 335 |
+
self,
|
| 336 |
+
hidden_size: int = 768,
|
| 337 |
+
num_hidden_layers: int = 6,
|
| 338 |
+
num_attention_heads: int = 12,
|
| 339 |
+
intermediate_size: int = 3072,
|
| 340 |
+
hidden_act: int = "gelu",
|
| 341 |
+
hidden_dropout_prob: int = 0.0,
|
| 342 |
+
attention_probs_dropout_prob: int = 0.0,
|
| 343 |
+
initializer_range: float = 0.02,
|
| 344 |
+
layer_norm_eps: float = 1e-12,
|
| 345 |
+
qkv_bias: bool = True,
|
| 346 |
+
use_cls_token: bool = True,
|
| 347 |
+
**kwargs,
|
| 348 |
+
):
|
| 349 |
+
super().__init__(**kwargs)
|
| 350 |
+
|
| 351 |
+
self.hidden_size = hidden_size
|
| 352 |
+
self.num_hidden_layers = num_hidden_layers
|
| 353 |
+
self.num_attention_heads = num_attention_heads
|
| 354 |
+
self.intermediate_size = intermediate_size
|
| 355 |
+
self.hidden_act = hidden_act
|
| 356 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 357 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 358 |
+
self.initializer_range = initializer_range
|
| 359 |
+
self.layer_norm_eps = layer_norm_eps
|
| 360 |
+
self.qkv_bias = qkv_bias
|
| 361 |
+
self.use_cls_token = use_cls_token
|
| 362 |
+
|
| 363 |
+
@classmethod
|
| 364 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
| 365 |
+
cls._set_token_in_kwargs(kwargs)
|
| 366 |
+
|
| 367 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 368 |
+
|
| 369 |
+
# get the multimodal config dict if we are loading from FlavaConfig
|
| 370 |
+
if config_dict.get("model_type") == "flava":
|
| 371 |
+
config_dict = config_dict["multimodal_config"]
|
| 372 |
+
|
| 373 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
| 374 |
+
logger.warning(
|
| 375 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 376 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
return cls.from_dict(config_dict, **kwargs)
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
class FlavaImageCodebookConfig(PretrainedConfig):
|
| 383 |
+
model_type = "flava_image_codebook"
|
| 384 |
+
|
| 385 |
+
r"""
|
| 386 |
+
[`FlavaImageCodebookConfig`] is the configuration class to store the configuration of a [`FlavaImageCodebook`]. It
|
| 387 |
+
is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture.
|
| 388 |
+
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
|
| 389 |
+
[facebook/flava-image-codebook](https://huggingface.co/facebook/flava-image-codebook) architecture.
|
| 390 |
+
|
| 391 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 392 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 393 |
+
|
| 394 |
+
Args:
|
| 395 |
+
num_groups (`int`, defaults to 4):
|
| 396 |
+
Number of groups to be created. This parameter as of now doesn't affect the model and is used for some
|
| 397 |
+
internal calculation and estimations.
|
| 398 |
+
input_channels (`int`, defaults to 3):
|
| 399 |
+
Number of channels in the image to be passed.
|
| 400 |
+
num_blocks_per_group (`int`, defaults to 2):
|
| 401 |
+
Number of conv-based blocks per group.
|
| 402 |
+
hidden_size (`int`, defaults to 256):
|
| 403 |
+
Size of hidden dim for the blocks.
|
| 404 |
+
vocab_size (`int`, defaults to 8192):
|
| 405 |
+
Size of the output vocabulary for the codebook.
|
| 406 |
+
freeze (`bool`, defaults to `True`):
|
| 407 |
+
Whether to freeze the weights of the model.
|
| 408 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 409 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 410 |
+
kwargs (*optional*):
|
| 411 |
+
Dictionary of keyword arguments.
|
| 412 |
+
|
| 413 |
+
Example:
|
| 414 |
+
|
| 415 |
+
```python
|
| 416 |
+
>>> from transformers import FlavaImageCodebookConfig, FlavaImageCodebook
|
| 417 |
+
|
| 418 |
+
>>> # Initializing a FlavaImageCodebook with style configuration
|
| 419 |
+
>>> configuration = FlavaImageCodebookConfig()
|
| 420 |
+
|
| 421 |
+
>>> # Initializing a FlavaImageCodebook model (with random weights) from the style configuration
|
| 422 |
+
>>> model = FlavaImageCodebook(configuration)
|
| 423 |
+
>>> # Accessing the model configuration
|
| 424 |
+
>>> configuration = model.config
|
| 425 |
+
```
|
| 426 |
+
"""
|
| 427 |
+
|
| 428 |
+
def __init__(
|
| 429 |
+
self,
|
| 430 |
+
num_groups: int = 4,
|
| 431 |
+
input_channels: int = 3,
|
| 432 |
+
num_blocks_per_group: int = 2,
|
| 433 |
+
hidden_size: int = 256,
|
| 434 |
+
vocab_size: int = 8192,
|
| 435 |
+
freeze: int = True,
|
| 436 |
+
initializer_range: float = 0.02,
|
| 437 |
+
**kwargs,
|
| 438 |
+
):
|
| 439 |
+
super().__init__(**kwargs)
|
| 440 |
+
self.num_groups = num_groups
|
| 441 |
+
self.input_channels = input_channels
|
| 442 |
+
self.num_blocks_per_group = num_blocks_per_group
|
| 443 |
+
self.hidden_size = hidden_size
|
| 444 |
+
self.vocab_size = vocab_size
|
| 445 |
+
self.freeze = freeze
|
| 446 |
+
self.initializer_range = initializer_range
|
| 447 |
+
|
| 448 |
+
@classmethod
|
| 449 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
| 450 |
+
cls._set_token_in_kwargs(kwargs)
|
| 451 |
+
|
| 452 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 453 |
+
|
| 454 |
+
# get the image codebook config dict if we are loading from FlavaConfig
|
| 455 |
+
if config_dict.get("model_type") == "flava":
|
| 456 |
+
config_dict = config_dict["image_codebook_config"]
|
| 457 |
+
|
| 458 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
| 459 |
+
logger.warning(
|
| 460 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 461 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
return cls.from_dict(config_dict, **kwargs)
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
class FlavaConfig(PretrainedConfig):
|
| 468 |
+
r"""
|
| 469 |
+
[`FlavaConfig`] is the configuration class to store the configuration of a [`FlavaModel`]. It is used to
|
| 470 |
+
instantiate FLAVA model according to the specified arguments, defining the text model, image model, image codebook
|
| 471 |
+
and multimodal model configs. Instantiating a configuration with the defaults will yield a similar configuration to
|
| 472 |
+
that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
|
| 473 |
+
|
| 474 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 475 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 476 |
+
|
| 477 |
+
Args:
|
| 478 |
+
text_config (`dict`, *optional*):
|
| 479 |
+
Dictionary of configuration options used to initialize [`FlavaTextConfig`].
|
| 480 |
+
image_config (`dict`, *optional*):
|
| 481 |
+
Dictionary of configuration options used to initialize [`FlavaImageConfig`].
|
| 482 |
+
multimodal_config (`dict`, *optional*):
|
| 483 |
+
Dictionary of configuration options used to initialize [`FlavaMultimodalConfig`].
|
| 484 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 485 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 486 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 487 |
+
The epsilon used by the layer normalization layers.
|
| 488 |
+
projection_dim (`int`, *optional*, defaults to 512):
|
| 489 |
+
Dimentionality of text and image projection layers.
|
| 490 |
+
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
|
| 491 |
+
The inital value of the *logit_scale* paramter. Default is used as per the original FLAVA/CLIP
|
| 492 |
+
implementation.
|
| 493 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 494 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 495 |
+
ce_ignore_index (`int`, *optional*, defaults to -100):
|
| 496 |
+
Cross entropy index to ignore.
|
| 497 |
+
mim_weight (`float`, *optional*, defaults to 1.0):
|
| 498 |
+
Weight to be assigned to MIM (Masked Image Modeling) unimodal loss
|
| 499 |
+
mlm_weight (`float`, *optional*, defaults to 1.0):
|
| 500 |
+
Weight to be assigned to MLM (Masked Language Modeling) unimodal loss
|
| 501 |
+
global_contrastive_weight (`float`, *optional*, defaults to 1.0):
|
| 502 |
+
Weight to be assigned to global contrastive cross-alignment loss.
|
| 503 |
+
itm_weight (`float`, *optional*, defaults to 1.0):
|
| 504 |
+
Weight to be assigned to image-text matching multimodal loss.
|
| 505 |
+
mmm_image_weight (`float`, *optional*, defaults to 1.0):
|
| 506 |
+
Weight to be assigned to MMM loss's image part.
|
| 507 |
+
mmm_text_weight (`float`, *optional*, defaults to 1.0):
|
| 508 |
+
Weight to be assigned to MMM loss's text part.
|
| 509 |
+
global_backprop_contrastive (`bool`, *optional*, defaults to `True`):
|
| 510 |
+
Whether to use global backpropgation through all workers in contrastive loss.
|
| 511 |
+
skip_unmasked_multimodal_encoder (`bool`, *optional*, defaults to `True`):
|
| 512 |
+
Whether to skip running unmasked multimodal encoder whose outputs are not used by FLAVA losses.
|
| 513 |
+
return_loss (`bool`, *optional*, defaults to `True`):
|
| 514 |
+
Whether to return loss or not
|
| 515 |
+
|
| 516 |
+
kwargs (*optional*):
|
| 517 |
+
Dictionary of keyword arguments.
|
| 518 |
+
|
| 519 |
+
Example:
|
| 520 |
+
|
| 521 |
+
```python
|
| 522 |
+
>>> from transformers import FlavaConfig, FlavaModel, FlavaForPreTraining
|
| 523 |
+
|
| 524 |
+
>>> # Initializing a FlavaConfig with style configuration
|
| 525 |
+
>>> configuration = FlavaConfig()
|
| 526 |
+
|
| 527 |
+
>>> # Initializing a FlavaModel and FlavaForPreTraining model (with random weights) from the style configuration
|
| 528 |
+
>>> model = FlavaModel(configuration)
|
| 529 |
+
>>> model_pre = FlavaForPreTraining(configuration)
|
| 530 |
+
|
| 531 |
+
>>> # Accessing the model configuration
|
| 532 |
+
>>> configuration = model.config
|
| 533 |
+
>>> configuration_pre = model_pre.config
|
| 534 |
+
```
|
| 535 |
+
"""
|
| 536 |
+
|
| 537 |
+
model_type = "flava"
|
| 538 |
+
|
| 539 |
+
def __init__(
|
| 540 |
+
self,
|
| 541 |
+
image_config: Dict[str, Any] = None,
|
| 542 |
+
text_config: Dict[str, Any] = None,
|
| 543 |
+
multimodal_config: Dict[str, Any] = None,
|
| 544 |
+
image_codebook_config: Dict[str, Any] = None,
|
| 545 |
+
hidden_size: int = 768,
|
| 546 |
+
layer_norm_eps: float = 1e-12,
|
| 547 |
+
projection_dim: int = 768,
|
| 548 |
+
init_codebook: bool = True,
|
| 549 |
+
logit_scale_init_value: float = 2.6592,
|
| 550 |
+
initializer_range: float = 0.02,
|
| 551 |
+
ce_ignore_index: int = -100,
|
| 552 |
+
mim_weight: float = 1.0,
|
| 553 |
+
mlm_weight: float = 1.0,
|
| 554 |
+
global_contrastive_weight: float = 1.0,
|
| 555 |
+
itm_weight: float = 1.0,
|
| 556 |
+
mmm_image_weight: float = 1.0,
|
| 557 |
+
mmm_text_weight: float = 1.0,
|
| 558 |
+
global_backprop_contrastive: bool = True,
|
| 559 |
+
skip_unmasked_multimodal_encoder: bool = True,
|
| 560 |
+
return_loss: bool = True,
|
| 561 |
+
**kwargs,
|
| 562 |
+
):
|
| 563 |
+
# If `_config_dict` exist, we use them for the backward compatibility.
|
| 564 |
+
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
|
| 565 |
+
# of confusion!).
|
| 566 |
+
text_config_dict = kwargs.pop("text_config_dict", None)
|
| 567 |
+
image_config_dict = kwargs.pop("image_config_dict", None)
|
| 568 |
+
multimodal_config_dict = kwargs.pop("multimodal_config_dict", None)
|
| 569 |
+
image_codebook_config_dict = kwargs.pop("image_codebook_config_dict", None)
|
| 570 |
+
|
| 571 |
+
super().__init__(**kwargs)
|
| 572 |
+
|
| 573 |
+
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
|
| 574 |
+
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
|
| 575 |
+
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
|
| 576 |
+
if text_config_dict is not None:
|
| 577 |
+
if text_config is None:
|
| 578 |
+
text_config = {}
|
| 579 |
+
|
| 580 |
+
# This is the complete result when using `text_config_dict`.
|
| 581 |
+
_text_config_dict = FlavaTextConfig(**text_config_dict).to_dict()
|
| 582 |
+
|
| 583 |
+
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
|
| 584 |
+
for key, value in _text_config_dict.items():
|
| 585 |
+
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
|
| 586 |
+
# If specified in `text_config_dict`
|
| 587 |
+
if key in text_config_dict:
|
| 588 |
+
message = (
|
| 589 |
+
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
|
| 590 |
+
f'The value `text_config_dict["{key}"]` will be used instead.'
|
| 591 |
+
)
|
| 592 |
+
# If inferred from default argument values (just to be super careful)
|
| 593 |
+
else:
|
| 594 |
+
message = (
|
| 595 |
+
f"`text_config_dict` is provided which will be used to initialize `FlavaTextConfig`. The "
|
| 596 |
+
f'value `text_config["{key}"]` will be overriden.'
|
| 597 |
+
)
|
| 598 |
+
logger.warning(message)
|
| 599 |
+
|
| 600 |
+
# Update all values in `text_config` with the ones in `_text_config_dict`.
|
| 601 |
+
text_config.update(_text_config_dict)
|
| 602 |
+
|
| 603 |
+
if image_config_dict is not None:
|
| 604 |
+
if image_config is None:
|
| 605 |
+
image_config = {}
|
| 606 |
+
|
| 607 |
+
# This is the complete result when using `image_config_dict`.
|
| 608 |
+
_image_config_dict = FlavaImageConfig(**image_config_dict).to_dict()
|
| 609 |
+
# convert keys to string instead of integer
|
| 610 |
+
if "id2label" in _image_config_dict:
|
| 611 |
+
_image_config_dict["id2label"] = {
|
| 612 |
+
str(key): value for key, value in _image_config_dict["id2label"].items()
|
| 613 |
+
}
|
| 614 |
+
|
| 615 |
+
# Give a warning if the values exist in both `_image_config_dict` and `image_config` but being different.
|
| 616 |
+
for key, value in _image_config_dict.items():
|
| 617 |
+
if key in image_config and value != image_config[key] and key not in ["transformers_version"]:
|
| 618 |
+
# If specified in `image_config_dict`
|
| 619 |
+
if key in image_config_dict:
|
| 620 |
+
message = (
|
| 621 |
+
f"`{key}` is found in both `image_config_dict` and `image_config` but with different "
|
| 622 |
+
f'values. The value `image_config_dict["{key}"]` will be used instead.'
|
| 623 |
+
)
|
| 624 |
+
# If inferred from default argument values (just to be super careful)
|
| 625 |
+
else:
|
| 626 |
+
message = (
|
| 627 |
+
f"`image_config_dict` is provided which will be used to initialize `FlavaImageConfig`. "
|
| 628 |
+
f'The value `image_config["{key}"]` will be overriden.'
|
| 629 |
+
)
|
| 630 |
+
logger.warning(message)
|
| 631 |
+
|
| 632 |
+
# Update all values in `image_config` with the ones in `_image_config_dict`.
|
| 633 |
+
image_config.update(_image_config_dict)
|
| 634 |
+
|
| 635 |
+
if multimodal_config_dict is not None:
|
| 636 |
+
if multimodal_config is None:
|
| 637 |
+
multimodal_config = {}
|
| 638 |
+
|
| 639 |
+
# This is the complete result when using `multimodal_config_dict`.
|
| 640 |
+
_multimodal_config_dict = FlavaMultimodalConfig(**multimodal_config_dict).to_dict()
|
| 641 |
+
|
| 642 |
+
# Give a warning if the values exist in both `_multimodal_config_dict` and `multimodal_config` but being
|
| 643 |
+
# different.
|
| 644 |
+
for key, value in _multimodal_config_dict.items():
|
| 645 |
+
if (
|
| 646 |
+
key in multimodal_config
|
| 647 |
+
and value != multimodal_config[key]
|
| 648 |
+
and key not in ["transformers_version"]
|
| 649 |
+
):
|
| 650 |
+
# If specified in `multimodal_config_dict`
|
| 651 |
+
if key in multimodal_config_dict:
|
| 652 |
+
message = (
|
| 653 |
+
f"`{key}` is found in both `multimodal_config_dict` and `multimodal_config` but with "
|
| 654 |
+
f'different values. The value `multimodal_config_dict["{key}"]` will be used instead.'
|
| 655 |
+
)
|
| 656 |
+
# If inferred from default argument values (just to be super careful)
|
| 657 |
+
else:
|
| 658 |
+
message = (
|
| 659 |
+
f"`multimodal_config_dict` is provided which will be used to initialize "
|
| 660 |
+
f'`FlavaMultimodalConfig`. The value `multimodal_config["{key}"]` will be overriden.'
|
| 661 |
+
)
|
| 662 |
+
logger.warning(message)
|
| 663 |
+
|
| 664 |
+
# Update all values in `multimodal_config` with the ones in `_multimodal_config_dict`.
|
| 665 |
+
multimodal_config.update(_multimodal_config_dict)
|
| 666 |
+
|
| 667 |
+
if image_codebook_config_dict is not None:
|
| 668 |
+
if image_codebook_config is None:
|
| 669 |
+
image_codebook_config = {}
|
| 670 |
+
|
| 671 |
+
# This is the complete result when using `image_codebook_config_dict`.
|
| 672 |
+
_image_codebook_config_dict = FlavaImageCodebookConfig(**image_codebook_config_dict).to_dict()
|
| 673 |
+
|
| 674 |
+
# Give a warning if the values exist in both `_image_codebook_config_dict` and `image_codebook_config` but
|
| 675 |
+
# being different.
|
| 676 |
+
for key, value in _image_codebook_config_dict.items():
|
| 677 |
+
if (
|
| 678 |
+
key in image_codebook_config
|
| 679 |
+
and value != image_codebook_config[key]
|
| 680 |
+
and key not in ["transformers_version"]
|
| 681 |
+
):
|
| 682 |
+
# If specified in `image_codebook_config_dict`
|
| 683 |
+
if key in image_codebook_config_dict:
|
| 684 |
+
message = (
|
| 685 |
+
f"`{key}` is found in both `image_codebook_config_dict` and `image_codebook_config` but "
|
| 686 |
+
f'with different values. The value `image_codebook_config_dict["{key}"]` will be used '
|
| 687 |
+
"instead."
|
| 688 |
+
)
|
| 689 |
+
# If inferred from default argument values (just to be super careful)
|
| 690 |
+
else:
|
| 691 |
+
message = (
|
| 692 |
+
f"`image_codebook_config_dict` is provided which will be used to initialize "
|
| 693 |
+
f'`FlavaImageCodebookConfig`. The value `image_codebook_config["{key}"]` will be overriden.'
|
| 694 |
+
)
|
| 695 |
+
logger.warning(message)
|
| 696 |
+
|
| 697 |
+
# Update all values in `image_codebook_config` with the ones in `_image_codebook_config_dict`.
|
| 698 |
+
image_codebook_config.update(_image_codebook_config_dict)
|
| 699 |
+
|
| 700 |
+
if image_config is None:
|
| 701 |
+
image_config = {}
|
| 702 |
+
logger.info("`image_config` is `None`. initializing the `FlavaImageConfig` with default values.")
|
| 703 |
+
|
| 704 |
+
if text_config is None:
|
| 705 |
+
text_config = {}
|
| 706 |
+
logger.info("`text_config` is `None`. Initializing the `FlavaTextConfig` with default values.")
|
| 707 |
+
|
| 708 |
+
if multimodal_config is None:
|
| 709 |
+
multimodal_config = {}
|
| 710 |
+
logger.info("`multimodal_config` is `None`. initializing the `FlavaMultimodalConfig` with default values.")
|
| 711 |
+
|
| 712 |
+
if image_codebook_config is None:
|
| 713 |
+
image_codebook_config = {}
|
| 714 |
+
logger.info(
|
| 715 |
+
"`image_codebook_config` is `None`. initializing the `FlavaImageCodebookConfig` with default values."
|
| 716 |
+
)
|
| 717 |
+
|
| 718 |
+
self.image_config = FlavaImageConfig(**image_config)
|
| 719 |
+
self.text_config = FlavaTextConfig(**text_config)
|
| 720 |
+
self.multimodal_config = FlavaMultimodalConfig(**multimodal_config)
|
| 721 |
+
self.image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config)
|
| 722 |
+
self.projection_dim = projection_dim
|
| 723 |
+
self.init_codebook = init_codebook
|
| 724 |
+
|
| 725 |
+
self.hidden_size = hidden_size
|
| 726 |
+
self.layer_norm_eps = layer_norm_eps
|
| 727 |
+
self.initializer_range = initializer_range
|
| 728 |
+
self.logit_scale_init_value = logit_scale_init_value
|
| 729 |
+
self.initializer_factor = 1.0
|
| 730 |
+
self.ce_ignore_index = ce_ignore_index
|
| 731 |
+
self.mim_weight = mim_weight
|
| 732 |
+
self.mlm_weight = mlm_weight
|
| 733 |
+
self.global_contrastive_weight = global_contrastive_weight
|
| 734 |
+
self.itm_weight = itm_weight
|
| 735 |
+
self.mmm_image_weight = mmm_image_weight
|
| 736 |
+
self.mmm_text_weight = mmm_text_weight
|
| 737 |
+
self.global_backprop_contrastive = global_backprop_contrastive
|
| 738 |
+
self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder
|
| 739 |
+
self.return_loss = return_loss
|
| 740 |
+
|
| 741 |
+
@classmethod
|
| 742 |
+
def from_configs(
|
| 743 |
+
cls,
|
| 744 |
+
image_config: FlavaImageConfig,
|
| 745 |
+
text_config: FlavaTextConfig,
|
| 746 |
+
multimodal_config: FlavaMultimodalConfig,
|
| 747 |
+
image_codebook_config: FlavaImageCodebookConfig,
|
| 748 |
+
**kwargs,
|
| 749 |
+
):
|
| 750 |
+
r"""
|
| 751 |
+
Instantiate a [`FlavaConfig`] (or a derived class) from flava text model configuration, flava image model
|
| 752 |
+
configuration, flava multimodal model and flava codebook model configuration.
|
| 753 |
+
|
| 754 |
+
Returns:
|
| 755 |
+
[`FlavaConfig`]: An instance of a configuration object
|
| 756 |
+
"""
|
| 757 |
+
|
| 758 |
+
return cls(
|
| 759 |
+
image_config=image_config.to_dict(),
|
| 760 |
+
text_config=text_config.to_dict(),
|
| 761 |
+
multimodal_config=multimodal_config.to_dict(),
|
| 762 |
+
image_codebook_config=image_codebook_config.to_dict(),
|
| 763 |
+
**kwargs,
|
| 764 |
+
)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/convert_dalle_to_flava_codebook.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import argparse
|
| 17 |
+
import os
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
|
| 21 |
+
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def rreplace(s, old, new, occurrence):
|
| 25 |
+
li = s.rsplit(old, occurrence)
|
| 26 |
+
return new.join(li)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def count_parameters(state_dict):
|
| 30 |
+
# encoder.embeddings are double copied in original FLAVA
|
| 31 |
+
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def upgrade_state_dict(state_dict):
|
| 35 |
+
upgrade = {}
|
| 36 |
+
|
| 37 |
+
group_keys = ["group_1", "group_2", "group_3", "group_4"]
|
| 38 |
+
for key, value in state_dict.items():
|
| 39 |
+
for group_key in group_keys:
|
| 40 |
+
if group_key in key:
|
| 41 |
+
key = key.replace(f"{group_key}.", f"{group_key}.group.")
|
| 42 |
+
|
| 43 |
+
if "res_path" in key:
|
| 44 |
+
key = key.replace("res_path.", "res_path.path.")
|
| 45 |
+
|
| 46 |
+
if key.endswith(".w"):
|
| 47 |
+
key = rreplace(key, ".w", ".weight", 1)
|
| 48 |
+
if key.endswith(".b"):
|
| 49 |
+
key = rreplace(key, ".b", ".bias", 1)
|
| 50 |
+
|
| 51 |
+
upgrade[key] = value.float()
|
| 52 |
+
|
| 53 |
+
return upgrade
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@torch.no_grad()
|
| 57 |
+
def convert_dalle_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, save_checkpoint=True):
|
| 58 |
+
"""
|
| 59 |
+
Copy/paste/tweak model's weights to transformers design.
|
| 60 |
+
"""
|
| 61 |
+
from dall_e import Encoder
|
| 62 |
+
|
| 63 |
+
encoder = Encoder()
|
| 64 |
+
if os.path.exists(checkpoint_path):
|
| 65 |
+
ckpt = torch.load(checkpoint_path)
|
| 66 |
+
else:
|
| 67 |
+
ckpt = torch.hub.load_state_dict_from_url(checkpoint_path)
|
| 68 |
+
|
| 69 |
+
if isinstance(ckpt, Encoder):
|
| 70 |
+
ckpt = ckpt.state_dict()
|
| 71 |
+
encoder.load_state_dict(ckpt)
|
| 72 |
+
|
| 73 |
+
if config_path is not None:
|
| 74 |
+
config = FlavaImageCodebookConfig.from_pretrained(config_path)
|
| 75 |
+
else:
|
| 76 |
+
config = FlavaImageCodebookConfig()
|
| 77 |
+
|
| 78 |
+
hf_model = FlavaImageCodebook(config).eval()
|
| 79 |
+
state_dict = encoder.state_dict()
|
| 80 |
+
|
| 81 |
+
hf_state_dict = upgrade_state_dict(state_dict)
|
| 82 |
+
hf_model.load_state_dict(hf_state_dict)
|
| 83 |
+
hf_state_dict = hf_model.state_dict()
|
| 84 |
+
hf_count = count_parameters(hf_state_dict)
|
| 85 |
+
state_dict_count = count_parameters(state_dict)
|
| 86 |
+
|
| 87 |
+
assert torch.allclose(hf_count, state_dict_count, atol=1e-3)
|
| 88 |
+
|
| 89 |
+
if save_checkpoint:
|
| 90 |
+
hf_model.save_pretrained(pytorch_dump_folder_path)
|
| 91 |
+
else:
|
| 92 |
+
return hf_state_dict
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
if __name__ == "__main__":
|
| 96 |
+
parser = argparse.ArgumentParser()
|
| 97 |
+
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
|
| 98 |
+
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
|
| 99 |
+
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
|
| 100 |
+
args = parser.parse_args()
|
| 101 |
+
|
| 102 |
+
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/convert_flava_original_pytorch_to_hf.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import argparse
|
| 17 |
+
import os
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
|
| 21 |
+
from transformers import FlavaConfig, FlavaForPreTraining
|
| 22 |
+
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def count_parameters(state_dict):
|
| 26 |
+
# encoder.embeddings are double copied in original FLAVA
|
| 27 |
+
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def upgrade_state_dict(state_dict, codebook_state_dict):
|
| 31 |
+
upgrade = {}
|
| 32 |
+
|
| 33 |
+
for key, value in state_dict.items():
|
| 34 |
+
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
|
| 35 |
+
continue
|
| 36 |
+
|
| 37 |
+
key = key.replace("heads.cmd.mim_head.cls.predictions", "mmm_image_head")
|
| 38 |
+
key = key.replace("heads.cmd.mlm_head.cls.predictions", "mmm_text_head")
|
| 39 |
+
key = key.replace("heads.cmd.itm_head.cls", "itm_head")
|
| 40 |
+
key = key.replace("heads.cmd.itm_head.pooler", "itm_head.pooler")
|
| 41 |
+
key = key.replace("heads.cmd.clip_head.logit_scale", "flava.logit_scale")
|
| 42 |
+
key = key.replace("heads.fairseq_mlm.cls.predictions", "mlm_head")
|
| 43 |
+
key = key.replace("heads.imagenet.mim_head.cls.predictions", "mim_head")
|
| 44 |
+
key = key.replace("mm_text_projection", "flava.text_to_mm_projection")
|
| 45 |
+
key = key.replace("mm_image_projection", "flava.image_to_mm_projection")
|
| 46 |
+
key = key.replace("image_encoder.module", "flava.image_model")
|
| 47 |
+
key = key.replace("text_encoder.module", "flava.text_model")
|
| 48 |
+
key = key.replace("mm_encoder.module.encoder.cls_token", "flava.multimodal_model.cls_token")
|
| 49 |
+
key = key.replace("mm_encoder.module", "flava.multimodal_model")
|
| 50 |
+
key = key.replace("text_projection", "flava.text_projection")
|
| 51 |
+
key = key.replace("image_projection", "flava.image_projection")
|
| 52 |
+
|
| 53 |
+
upgrade[key] = value.float()
|
| 54 |
+
|
| 55 |
+
for key, value in codebook_state_dict.items():
|
| 56 |
+
upgrade[f"image_codebook.{key}"] = value
|
| 57 |
+
|
| 58 |
+
return upgrade
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@torch.no_grad()
|
| 62 |
+
def convert_flava_checkpoint(checkpoint_path, codebook_path, pytorch_dump_folder_path, config_path=None):
|
| 63 |
+
"""
|
| 64 |
+
Copy/paste/tweak model's weights to transformers design.
|
| 65 |
+
"""
|
| 66 |
+
if config_path is not None:
|
| 67 |
+
config = FlavaConfig.from_pretrained(config_path)
|
| 68 |
+
else:
|
| 69 |
+
config = FlavaConfig()
|
| 70 |
+
|
| 71 |
+
hf_model = FlavaForPreTraining(config).eval()
|
| 72 |
+
|
| 73 |
+
codebook_state_dict = convert_dalle_checkpoint(codebook_path, None, save_checkpoint=False)
|
| 74 |
+
|
| 75 |
+
if os.path.exists(checkpoint_path):
|
| 76 |
+
state_dict = torch.load(checkpoint_path, map_location="cpu")
|
| 77 |
+
else:
|
| 78 |
+
state_dict = torch.hub.load_state_dict_from_url(checkpoint_path, map_location="cpu")
|
| 79 |
+
|
| 80 |
+
hf_state_dict = upgrade_state_dict(state_dict, codebook_state_dict)
|
| 81 |
+
hf_model.load_state_dict(hf_state_dict)
|
| 82 |
+
hf_state_dict = hf_model.state_dict()
|
| 83 |
+
hf_count = count_parameters(hf_state_dict)
|
| 84 |
+
state_dict_count = count_parameters(state_dict) + count_parameters(codebook_state_dict)
|
| 85 |
+
|
| 86 |
+
assert torch.allclose(hf_count, state_dict_count, atol=1e-3)
|
| 87 |
+
|
| 88 |
+
hf_model.save_pretrained(pytorch_dump_folder_path)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
if __name__ == "__main__":
|
| 92 |
+
parser = argparse.ArgumentParser()
|
| 93 |
+
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
|
| 94 |
+
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
|
| 95 |
+
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
|
| 96 |
+
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
|
| 97 |
+
args = parser.parse_args()
|
| 98 |
+
|
| 99 |
+
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/feature_extraction_flava.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Feature extractor class for FLAVA."""
|
| 16 |
+
|
| 17 |
+
import warnings
|
| 18 |
+
|
| 19 |
+
from ...utils import logging
|
| 20 |
+
from .image_processing_flava import FlavaImageProcessor
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.get_logger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class FlavaFeatureExtractor(FlavaImageProcessor):
|
| 27 |
+
def __init__(self, *args, **kwargs) -> None:
|
| 28 |
+
warnings.warn(
|
| 29 |
+
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
|
| 30 |
+
" use FlavaImageProcessor instead.",
|
| 31 |
+
FutureWarning,
|
| 32 |
+
)
|
| 33 |
+
super().__init__(*args, **kwargs)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/image_processing_flava.py
ADDED
|
@@ -0,0 +1,694 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Image processor class for Flava."""
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
import random
|
| 19 |
+
from functools import lru_cache
|
| 20 |
+
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
|
| 24 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
| 25 |
+
from ...image_transforms import resize, to_channel_dimension_format
|
| 26 |
+
from ...image_utils import (
|
| 27 |
+
OPENAI_CLIP_MEAN,
|
| 28 |
+
OPENAI_CLIP_STD,
|
| 29 |
+
ChannelDimension,
|
| 30 |
+
ImageInput,
|
| 31 |
+
PILImageResampling,
|
| 32 |
+
infer_channel_dimension_format,
|
| 33 |
+
is_scaled_image,
|
| 34 |
+
make_list_of_images,
|
| 35 |
+
to_numpy_array,
|
| 36 |
+
valid_images,
|
| 37 |
+
)
|
| 38 |
+
from ...utils import TensorType, is_vision_available, logging
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
if is_vision_available():
|
| 42 |
+
import PIL
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
logger = logging.get_logger(__name__)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# These values are taken from CLIP
|
| 49 |
+
FLAVA_IMAGE_MEAN = OPENAI_CLIP_MEAN
|
| 50 |
+
FLAVA_IMAGE_STD = OPENAI_CLIP_STD
|
| 51 |
+
FLAVA_CODEBOOK_MEAN = [0.0, 0.0, 0.0]
|
| 52 |
+
FLAVA_CODEBOOK_STD = [1.0, 1.0, 1.0]
|
| 53 |
+
LOGIT_LAPLACE_EPS: float = 0.1
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# Inspired from https://github.com/microsoft/unilm/blob/master/beit/masking_generator.py
|
| 57 |
+
class FlavaMaskingGenerator:
|
| 58 |
+
def __init__(
|
| 59 |
+
self,
|
| 60 |
+
input_size: Union[int, Tuple[int, int]] = 14,
|
| 61 |
+
total_mask_patches: int = 75,
|
| 62 |
+
mask_group_max_patches: Optional[int] = None,
|
| 63 |
+
mask_group_min_patches: int = 16,
|
| 64 |
+
mask_group_min_aspect_ratio: Optional[float] = 0.3,
|
| 65 |
+
mask_group_max_aspect_ratio: float = None,
|
| 66 |
+
):
|
| 67 |
+
if not isinstance(input_size, tuple):
|
| 68 |
+
input_size = (input_size,) * 2
|
| 69 |
+
self.height, self.width = input_size
|
| 70 |
+
|
| 71 |
+
self.num_patches = self.height * self.width
|
| 72 |
+
self.total_mask_patches = total_mask_patches
|
| 73 |
+
|
| 74 |
+
self.mask_group_min_patches = mask_group_min_patches
|
| 75 |
+
self.mask_group_max_patches = total_mask_patches if mask_group_max_patches is None else mask_group_max_patches
|
| 76 |
+
|
| 77 |
+
mask_group_max_aspect_ratio = mask_group_max_aspect_ratio or 1 / mask_group_min_aspect_ratio
|
| 78 |
+
self.log_aspect_ratio = (math.log(mask_group_min_aspect_ratio), math.log(mask_group_max_aspect_ratio))
|
| 79 |
+
|
| 80 |
+
def __repr__(self):
|
| 81 |
+
repr_str = "MaskingGenerator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
|
| 82 |
+
self.height,
|
| 83 |
+
self.width,
|
| 84 |
+
self.mask_group_min_patches,
|
| 85 |
+
self.mask_group_max_patches,
|
| 86 |
+
self.total_mask_patches,
|
| 87 |
+
self.log_aspect_ratio[0],
|
| 88 |
+
self.log_aspect_ratio[1],
|
| 89 |
+
)
|
| 90 |
+
return repr_str
|
| 91 |
+
|
| 92 |
+
def get_shape(self):
|
| 93 |
+
return self.height, self.width
|
| 94 |
+
|
| 95 |
+
def _mask(self, mask, max_mask_patches):
|
| 96 |
+
delta = 0
|
| 97 |
+
for _attempt in range(10):
|
| 98 |
+
target_area = random.uniform(self.mask_group_min_patches, max_mask_patches)
|
| 99 |
+
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
|
| 100 |
+
height = int(round(math.sqrt(target_area * aspect_ratio)))
|
| 101 |
+
width = int(round(math.sqrt(target_area / aspect_ratio)))
|
| 102 |
+
if width < self.width and height < self.height:
|
| 103 |
+
top = random.randint(0, self.height - height)
|
| 104 |
+
left = random.randint(0, self.width - width)
|
| 105 |
+
|
| 106 |
+
num_masked = mask[top : top + height, left : left + width].sum()
|
| 107 |
+
# Overlap
|
| 108 |
+
if 0 < height * width - num_masked <= max_mask_patches:
|
| 109 |
+
for i in range(top, top + height):
|
| 110 |
+
for j in range(left, left + width):
|
| 111 |
+
if mask[i, j] == 0:
|
| 112 |
+
mask[i, j] = 1
|
| 113 |
+
delta += 1
|
| 114 |
+
|
| 115 |
+
if delta > 0:
|
| 116 |
+
break
|
| 117 |
+
return delta
|
| 118 |
+
|
| 119 |
+
def __call__(self):
|
| 120 |
+
mask = np.zeros(shape=self.get_shape(), dtype=int)
|
| 121 |
+
mask_count = 0
|
| 122 |
+
while mask_count < self.total_mask_patches:
|
| 123 |
+
max_mask_patches = self.total_mask_patches - mask_count
|
| 124 |
+
max_mask_patches = min(max_mask_patches, self.mask_group_max_patches)
|
| 125 |
+
|
| 126 |
+
delta = self._mask(mask, max_mask_patches)
|
| 127 |
+
if delta == 0:
|
| 128 |
+
break
|
| 129 |
+
else:
|
| 130 |
+
mask_count += delta
|
| 131 |
+
|
| 132 |
+
return mask
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class FlavaImageProcessor(BaseImageProcessor):
|
| 136 |
+
r"""
|
| 137 |
+
Constructs a Flava image processor.
|
| 138 |
+
|
| 139 |
+
Args:
|
| 140 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
| 141 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
|
| 142 |
+
`do_resize` parameter in `preprocess`.
|
| 143 |
+
size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
|
| 144 |
+
Size of the image after resizing. Can be overridden by the `size` parameter in `preprocess`.
|
| 145 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
|
| 146 |
+
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in
|
| 147 |
+
`preprocess`.
|
| 148 |
+
do_center_crop (`bool`, *optional*, defaults to `True`):
|
| 149 |
+
Whether to center crop the images. Can be overridden by the `do_center_crop` parameter in `preprocess`.
|
| 150 |
+
crop_size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
|
| 151 |
+
Size of image after the center crop `(crop_size["height"], crop_size["width"])`. Can be overridden by the
|
| 152 |
+
`crop_size` parameter in `preprocess`.
|
| 153 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
| 154 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
|
| 155 |
+
parameter in `preprocess`.
|
| 156 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
| 157 |
+
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in
|
| 158 |
+
`preprocess`.
|
| 159 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
| 160 |
+
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in `preprocess`.
|
| 161 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
|
| 162 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
| 163 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
| 164 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
|
| 165 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
| 166 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 167 |
+
return_image_mask (`bool`, *optional*, defaults to `False`):
|
| 168 |
+
Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`.
|
| 169 |
+
input_size_patches (`int`, *optional*, defaults to 14):
|
| 170 |
+
Number of patches in the image in height and width direction. 14x14 = 196 total patches. Can be overridden
|
| 171 |
+
by the `input_size_patches` parameter in `preprocess`.
|
| 172 |
+
total_mask_patches (`int`, *optional*, defaults to 75):
|
| 173 |
+
Total number of patches that should be masked. Can be overridden by the `total_mask_patches` parameter in
|
| 174 |
+
`preprocess`.
|
| 175 |
+
mask_group_min_patches (`int`, *optional*, defaults to 16):
|
| 176 |
+
Minimum number of patches that should be masked. Can be overridden by the `mask_group_min_patches`
|
| 177 |
+
parameter in `preprocess`.
|
| 178 |
+
mask_group_max_patches (`int`, *optional*):
|
| 179 |
+
Maximum number of patches that should be masked. Can be overridden by the `mask_group_max_patches`
|
| 180 |
+
parameter in `preprocess`.
|
| 181 |
+
mask_group_min_aspect_ratio (`float`, *optional*, defaults to 0.3):
|
| 182 |
+
Minimum aspect ratio of the mask window. Can be overridden by the `mask_group_min_aspect_ratio` parameter
|
| 183 |
+
in `preprocess`.
|
| 184 |
+
mask_group_max_aspect_ratio (`float`, *optional*):
|
| 185 |
+
Maximum aspect ratio of the mask window. Can be overridden by the `mask_group_max_aspect_ratio` parameter
|
| 186 |
+
in `preprocess`.
|
| 187 |
+
codebook_do_resize (`bool`, *optional*, defaults to `True`):
|
| 188 |
+
Whether to resize the input for codebook to a certain. Can be overridden by the `codebook_do_resize`
|
| 189 |
+
parameter in `preprocess`. `codebook_size`.
|
| 190 |
+
codebook_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
|
| 191 |
+
Resize the input for codebook to the given size. Can be overridden by the `codebook_size` parameter in
|
| 192 |
+
`preprocess`.
|
| 193 |
+
codebook_resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
|
| 194 |
+
Resampling filter to use if resizing the codebook image. Can be overridden by the `codebook_resample`
|
| 195 |
+
parameter in `preprocess`.
|
| 196 |
+
codebook_do_center_crop (`bool`, *optional*, defaults to `True`):
|
| 197 |
+
Whether to crop the input for codebook at the center. If the input size is smaller than
|
| 198 |
+
`codebook_crop_size` along any edge, the image is padded with 0's and then center cropped. Can be
|
| 199 |
+
overridden by the `codebook_do_center_crop` parameter in `preprocess`.
|
| 200 |
+
codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
|
| 201 |
+
Desired output size for codebook input when applying center-cropping. Can be overridden by the
|
| 202 |
+
`codebook_crop_size` parameter in `preprocess`.
|
| 203 |
+
codebook_do_rescale (`bool`, *optional*, defaults to `True`):
|
| 204 |
+
Whether to rescale the input for codebook by the specified scale `codebook_rescale_factor`. Can be
|
| 205 |
+
overridden by the `codebook_do_rescale` parameter in `preprocess`.
|
| 206 |
+
codebook_rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
| 207 |
+
Defines the scale factor to use if rescaling the codebook image. Can be overridden by the
|
| 208 |
+
`codebook_rescale_factor` parameter in `preprocess`.
|
| 209 |
+
codebook_do_map_pixels (`bool`, *optional*, defaults to `True`):
|
| 210 |
+
Whether to map the pixel values of the codebook input to (1 - 2e)x + e. Can be overridden by the
|
| 211 |
+
`codebook_do_map_pixels` parameter in `preprocess`.
|
| 212 |
+
codebook_do_normalize (`bool`, *optional*, defaults to `True`):
|
| 213 |
+
Whether or not to normalize the input for codebook with `codebook_image_mean` and `codebook_image_std`. Can
|
| 214 |
+
be overridden by the `codebook_do_normalize` parameter in `preprocess`.
|
| 215 |
+
codebook_image_mean (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0, 0, 0]`):
|
| 216 |
+
The sequence of means for each channel, to be used when normalizing images for codebook. Can be overridden
|
| 217 |
+
by the `codebook_image_mean` parameter in `preprocess`.
|
| 218 |
+
codebook_image_std (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
|
| 219 |
+
The sequence of standard deviations for each channel, to be used when normalizing images for codebook. Can
|
| 220 |
+
be overridden by the `codebook_image_std` parameter in `preprocess`.
|
| 221 |
+
"""
|
| 222 |
+
|
| 223 |
+
model_input_names = ["pixel_values"]
|
| 224 |
+
|
| 225 |
+
def __init__(
|
| 226 |
+
self,
|
| 227 |
+
do_resize: bool = True,
|
| 228 |
+
size: Dict[str, int] = None,
|
| 229 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
| 230 |
+
do_center_crop: bool = True,
|
| 231 |
+
crop_size: Dict[str, int] = None,
|
| 232 |
+
do_rescale: bool = True,
|
| 233 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
| 234 |
+
do_normalize: bool = True,
|
| 235 |
+
image_mean: Optional[Union[float, Iterable[float]]] = None,
|
| 236 |
+
image_std: Optional[Union[float, Iterable[float]]] = None,
|
| 237 |
+
# Mask related params
|
| 238 |
+
return_image_mask: bool = False,
|
| 239 |
+
input_size_patches: int = 14,
|
| 240 |
+
total_mask_patches: int = 75,
|
| 241 |
+
mask_group_min_patches: int = 16,
|
| 242 |
+
mask_group_max_patches: Optional[int] = None,
|
| 243 |
+
mask_group_min_aspect_ratio: float = 0.3,
|
| 244 |
+
mask_group_max_aspect_ratio: Optional[float] = None,
|
| 245 |
+
# Codebook related params
|
| 246 |
+
return_codebook_pixels: bool = False,
|
| 247 |
+
codebook_do_resize: bool = True,
|
| 248 |
+
codebook_size: bool = None,
|
| 249 |
+
codebook_resample: int = PILImageResampling.LANCZOS,
|
| 250 |
+
codebook_do_center_crop: bool = True,
|
| 251 |
+
codebook_crop_size: int = None,
|
| 252 |
+
codebook_do_rescale: bool = True,
|
| 253 |
+
codebook_rescale_factor: Union[int, float] = 1 / 255,
|
| 254 |
+
codebook_do_map_pixels: bool = True,
|
| 255 |
+
codebook_do_normalize: bool = True,
|
| 256 |
+
codebook_image_mean: Optional[Union[float, Iterable[float]]] = None,
|
| 257 |
+
codebook_image_std: Optional[Union[float, Iterable[float]]] = None,
|
| 258 |
+
**kwargs,
|
| 259 |
+
) -> None:
|
| 260 |
+
super().__init__(**kwargs)
|
| 261 |
+
size = size if size is not None else {"height": 224, "width": 224}
|
| 262 |
+
size = get_size_dict(size)
|
| 263 |
+
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
|
| 264 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size")
|
| 265 |
+
|
| 266 |
+
codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112}
|
| 267 |
+
codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
|
| 268 |
+
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112}
|
| 269 |
+
codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
|
| 270 |
+
|
| 271 |
+
self.do_resize = do_resize
|
| 272 |
+
self.size = size
|
| 273 |
+
self.resample = resample
|
| 274 |
+
self.do_rescale = do_rescale
|
| 275 |
+
self.rescale_factor = rescale_factor
|
| 276 |
+
self.do_center_crop = do_center_crop
|
| 277 |
+
self.crop_size = crop_size
|
| 278 |
+
self.do_normalize = do_normalize
|
| 279 |
+
self.image_mean = image_mean if image_mean is not None else FLAVA_IMAGE_MEAN
|
| 280 |
+
self.image_std = image_std if image_std is not None else FLAVA_IMAGE_STD
|
| 281 |
+
|
| 282 |
+
self.return_image_mask = return_image_mask
|
| 283 |
+
self.input_size_patches = input_size_patches
|
| 284 |
+
self.total_mask_patches = total_mask_patches
|
| 285 |
+
self.mask_group_min_patches = mask_group_min_patches
|
| 286 |
+
self.mask_group_max_patches = mask_group_max_patches
|
| 287 |
+
self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio
|
| 288 |
+
self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio
|
| 289 |
+
|
| 290 |
+
self.return_codebook_pixels = return_codebook_pixels
|
| 291 |
+
self.codebook_do_resize = codebook_do_resize
|
| 292 |
+
self.codebook_size = codebook_size
|
| 293 |
+
self.codebook_resample = codebook_resample
|
| 294 |
+
self.codebook_do_center_crop = codebook_do_center_crop
|
| 295 |
+
self.codebook_crop_size = codebook_crop_size
|
| 296 |
+
self.codebook_do_rescale = codebook_do_rescale
|
| 297 |
+
self.codebook_rescale_factor = codebook_rescale_factor
|
| 298 |
+
self.codebook_do_map_pixels = codebook_do_map_pixels
|
| 299 |
+
self.codebook_do_normalize = codebook_do_normalize
|
| 300 |
+
self.codebook_image_mean = codebook_image_mean
|
| 301 |
+
self.codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else FLAVA_CODEBOOK_MEAN
|
| 302 |
+
self.codebook_image_std = codebook_image_std if codebook_image_std is not None else FLAVA_CODEBOOK_STD
|
| 303 |
+
|
| 304 |
+
@classmethod
|
| 305 |
+
def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
|
| 306 |
+
"""
|
| 307 |
+
Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
|
| 308 |
+
created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)`
|
| 309 |
+
"""
|
| 310 |
+
image_processor_dict = image_processor_dict.copy()
|
| 311 |
+
if "codebook_size" in kwargs:
|
| 312 |
+
image_processor_dict["codebook_size"] = kwargs.pop("codebook_size")
|
| 313 |
+
if "codebook_crop_size" in kwargs:
|
| 314 |
+
image_processor_dict["codebook_crop_size"] = kwargs.pop("codebook_crop_size")
|
| 315 |
+
return super().from_dict(image_processor_dict, **kwargs)
|
| 316 |
+
|
| 317 |
+
@lru_cache()
|
| 318 |
+
def masking_generator(
|
| 319 |
+
self,
|
| 320 |
+
input_size_patches,
|
| 321 |
+
total_mask_patches,
|
| 322 |
+
mask_group_min_patches,
|
| 323 |
+
mask_group_max_patches,
|
| 324 |
+
mask_group_min_aspect_ratio,
|
| 325 |
+
mask_group_max_aspect_ratio,
|
| 326 |
+
) -> FlavaMaskingGenerator:
|
| 327 |
+
return FlavaMaskingGenerator(
|
| 328 |
+
input_size=input_size_patches,
|
| 329 |
+
total_mask_patches=total_mask_patches,
|
| 330 |
+
mask_group_min_patches=mask_group_min_patches,
|
| 331 |
+
mask_group_max_patches=mask_group_max_patches,
|
| 332 |
+
mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
|
| 333 |
+
mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
# Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
|
| 337 |
+
def resize(
|
| 338 |
+
self,
|
| 339 |
+
image: np.ndarray,
|
| 340 |
+
size: Dict[str, int],
|
| 341 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
| 342 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 343 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 344 |
+
**kwargs,
|
| 345 |
+
) -> np.ndarray:
|
| 346 |
+
"""
|
| 347 |
+
Resize an image to `(size["height"], size["width"])`.
|
| 348 |
+
|
| 349 |
+
Args:
|
| 350 |
+
image (`np.ndarray`):
|
| 351 |
+
Image to resize.
|
| 352 |
+
size (`Dict[str, int]`):
|
| 353 |
+
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
|
| 354 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
|
| 355 |
+
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
|
| 356 |
+
data_format (`ChannelDimension` or `str`, *optional*):
|
| 357 |
+
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
| 358 |
+
image is used. Can be one of:
|
| 359 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 360 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 361 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
| 362 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 363 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
| 364 |
+
from the input image. Can be one of:
|
| 365 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 366 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 367 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
| 368 |
+
|
| 369 |
+
Returns:
|
| 370 |
+
`np.ndarray`: The resized image.
|
| 371 |
+
"""
|
| 372 |
+
size = get_size_dict(size)
|
| 373 |
+
if "height" not in size or "width" not in size:
|
| 374 |
+
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
|
| 375 |
+
output_size = (size["height"], size["width"])
|
| 376 |
+
return resize(
|
| 377 |
+
image,
|
| 378 |
+
size=output_size,
|
| 379 |
+
resample=resample,
|
| 380 |
+
data_format=data_format,
|
| 381 |
+
input_data_format=input_data_format,
|
| 382 |
+
**kwargs,
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
def map_pixels(self, image: np.ndarray) -> np.ndarray:
|
| 386 |
+
return (1 - 2 * LOGIT_LAPLACE_EPS) * image + LOGIT_LAPLACE_EPS
|
| 387 |
+
|
| 388 |
+
def _preprocess_image(
|
| 389 |
+
self,
|
| 390 |
+
image: ImageInput,
|
| 391 |
+
do_resize: bool = None,
|
| 392 |
+
size: Dict[str, int] = None,
|
| 393 |
+
resample: PILImageResampling = None,
|
| 394 |
+
do_center_crop: bool = None,
|
| 395 |
+
crop_size: Dict[str, int] = None,
|
| 396 |
+
do_rescale: bool = None,
|
| 397 |
+
rescale_factor: float = None,
|
| 398 |
+
do_normalize: bool = None,
|
| 399 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 400 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 401 |
+
do_map_pixels: bool = None,
|
| 402 |
+
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
| 403 |
+
input_data_format: Optional[ChannelDimension] = None,
|
| 404 |
+
) -> np.ndarray:
|
| 405 |
+
"""Preprocesses a single image."""
|
| 406 |
+
if do_resize and size is None or resample is None:
|
| 407 |
+
raise ValueError("Size and resample must be specified if do_resize is True.")
|
| 408 |
+
|
| 409 |
+
if do_rescale and rescale_factor is None:
|
| 410 |
+
raise ValueError("Rescale factor must be specified if do_rescale is True.")
|
| 411 |
+
|
| 412 |
+
if do_normalize and (image_mean is None or image_std is None):
|
| 413 |
+
raise ValueError("Image mean and std must be specified if do_normalize is True.")
|
| 414 |
+
|
| 415 |
+
# All transformations expect numpy arrays.
|
| 416 |
+
image = to_numpy_array(image)
|
| 417 |
+
|
| 418 |
+
if is_scaled_image(image) and do_rescale:
|
| 419 |
+
logger.warning_once(
|
| 420 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
| 421 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
if input_data_format is None:
|
| 425 |
+
# We assume that all images have the same channel dimension format.
|
| 426 |
+
input_data_format = infer_channel_dimension_format(image)
|
| 427 |
+
|
| 428 |
+
if do_resize:
|
| 429 |
+
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
|
| 430 |
+
|
| 431 |
+
if do_center_crop:
|
| 432 |
+
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
|
| 433 |
+
|
| 434 |
+
if do_rescale:
|
| 435 |
+
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
| 436 |
+
|
| 437 |
+
if do_normalize:
|
| 438 |
+
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
|
| 439 |
+
|
| 440 |
+
if do_map_pixels:
|
| 441 |
+
image = self.map_pixels(image)
|
| 442 |
+
|
| 443 |
+
if data_format is not None:
|
| 444 |
+
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
|
| 445 |
+
return image
|
| 446 |
+
|
| 447 |
+
def preprocess(
|
| 448 |
+
self,
|
| 449 |
+
images: ImageInput,
|
| 450 |
+
do_resize: Optional[bool] = None,
|
| 451 |
+
size: Dict[str, int] = None,
|
| 452 |
+
resample: PILImageResampling = None,
|
| 453 |
+
do_center_crop: Optional[bool] = None,
|
| 454 |
+
crop_size: Optional[Dict[str, int]] = None,
|
| 455 |
+
do_rescale: Optional[bool] = None,
|
| 456 |
+
rescale_factor: Optional[float] = None,
|
| 457 |
+
do_normalize: Optional[bool] = None,
|
| 458 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 459 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 460 |
+
# Mask related params
|
| 461 |
+
return_image_mask: Optional[bool] = None,
|
| 462 |
+
input_size_patches: Optional[int] = None,
|
| 463 |
+
total_mask_patches: Optional[int] = None,
|
| 464 |
+
mask_group_min_patches: Optional[int] = None,
|
| 465 |
+
mask_group_max_patches: Optional[int] = None,
|
| 466 |
+
mask_group_min_aspect_ratio: Optional[float] = None,
|
| 467 |
+
mask_group_max_aspect_ratio: Optional[float] = None,
|
| 468 |
+
# Codebook related params
|
| 469 |
+
return_codebook_pixels: Optional[bool] = None,
|
| 470 |
+
codebook_do_resize: Optional[bool] = None,
|
| 471 |
+
codebook_size: Optional[Dict[str, int]] = None,
|
| 472 |
+
codebook_resample: Optional[int] = None,
|
| 473 |
+
codebook_do_center_crop: Optional[bool] = None,
|
| 474 |
+
codebook_crop_size: Optional[Dict[str, int]] = None,
|
| 475 |
+
codebook_do_rescale: Optional[bool] = None,
|
| 476 |
+
codebook_rescale_factor: Optional[float] = None,
|
| 477 |
+
codebook_do_map_pixels: Optional[bool] = None,
|
| 478 |
+
codebook_do_normalize: Optional[bool] = None,
|
| 479 |
+
codebook_image_mean: Optional[Iterable[float]] = None,
|
| 480 |
+
codebook_image_std: Optional[Iterable[float]] = None,
|
| 481 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
| 482 |
+
data_format: ChannelDimension = ChannelDimension.FIRST,
|
| 483 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 484 |
+
**kwargs,
|
| 485 |
+
) -> PIL.Image.Image:
|
| 486 |
+
"""
|
| 487 |
+
Preprocess an image or batch of images.
|
| 488 |
+
|
| 489 |
+
Args:
|
| 490 |
+
images (`ImageInput`):
|
| 491 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
| 492 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
| 493 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
| 494 |
+
Whether to resize the image.
|
| 495 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
| 496 |
+
Size of the image.
|
| 497 |
+
resample (`int`, *optional*, defaults to `self.resample`):
|
| 498 |
+
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
|
| 499 |
+
has an effect if `do_resize` is set to `True`.
|
| 500 |
+
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
|
| 501 |
+
Whether to center crop the image.
|
| 502 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
|
| 503 |
+
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
|
| 504 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
| 505 |
+
Whether to rescale the image values between [0 - 1].
|
| 506 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
| 507 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
| 508 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
| 509 |
+
Whether to normalize the image.
|
| 510 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
| 511 |
+
Image mean.
|
| 512 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
| 513 |
+
Image standard deviation.
|
| 514 |
+
return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`):
|
| 515 |
+
Whether to return the image mask.
|
| 516 |
+
input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`):
|
| 517 |
+
Size of the patches to extract from the image.
|
| 518 |
+
total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`):
|
| 519 |
+
Total number of patches to extract from the image.
|
| 520 |
+
mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`):
|
| 521 |
+
Minimum number of patches to extract from the image.
|
| 522 |
+
mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`):
|
| 523 |
+
Maximum number of patches to extract from the image.
|
| 524 |
+
mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`):
|
| 525 |
+
Minimum aspect ratio of the patches to extract from the image.
|
| 526 |
+
mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`):
|
| 527 |
+
Maximum aspect ratio of the patches to extract from the image.
|
| 528 |
+
return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`):
|
| 529 |
+
Whether to return the codebook pixels.
|
| 530 |
+
codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`):
|
| 531 |
+
Whether to resize the codebook pixels.
|
| 532 |
+
codebook_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_size`):
|
| 533 |
+
Size of the codebook pixels.
|
| 534 |
+
codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`):
|
| 535 |
+
Resampling filter to use if resizing the codebook pixels. This can be one of the enum
|
| 536 |
+
`PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`.
|
| 537 |
+
codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`):
|
| 538 |
+
Whether to center crop the codebook pixels.
|
| 539 |
+
codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`):
|
| 540 |
+
Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set
|
| 541 |
+
to `True`.
|
| 542 |
+
codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`):
|
| 543 |
+
Whether to rescale the codebook pixels values between [0 - 1].
|
| 544 |
+
codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`):
|
| 545 |
+
Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`.
|
| 546 |
+
codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`):
|
| 547 |
+
Whether to map the codebook pixels values.
|
| 548 |
+
codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`):
|
| 549 |
+
Whether to normalize the codebook pixels.
|
| 550 |
+
codebook_image_mean (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_mean`):
|
| 551 |
+
Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`.
|
| 552 |
+
codebook_image_std (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_std`):
|
| 553 |
+
Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is
|
| 554 |
+
set to `True`.
|
| 555 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
| 556 |
+
The type of tensors to return. Can be one of:
|
| 557 |
+
- Unset: Return a list of `np.ndarray`.
|
| 558 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
| 559 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
| 560 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
| 561 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
| 562 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
| 563 |
+
The channel dimension format for the output image. Can be one of:
|
| 564 |
+
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 565 |
+
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 566 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 567 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
| 568 |
+
from the input image. Can be one of:
|
| 569 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 570 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 571 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
| 572 |
+
"""
|
| 573 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
| 574 |
+
size = size if size is not None else self.size
|
| 575 |
+
size = get_size_dict(size)
|
| 576 |
+
resample = resample if resample is not None else self.resample
|
| 577 |
+
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
|
| 578 |
+
crop_size = crop_size if crop_size is not None else self.crop_size
|
| 579 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size")
|
| 580 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
| 581 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
| 582 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
| 583 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
| 584 |
+
image_std = image_std if image_std is not None else self.image_std
|
| 585 |
+
|
| 586 |
+
return_image_mask = return_image_mask if return_image_mask is not None else self.return_image_mask
|
| 587 |
+
input_size_patches = input_size_patches if input_size_patches is not None else self.input_size_patches
|
| 588 |
+
total_mask_patches = total_mask_patches if total_mask_patches is not None else self.total_mask_patches
|
| 589 |
+
mask_group_min_patches = (
|
| 590 |
+
mask_group_min_patches if mask_group_min_patches is not None else self.mask_group_min_patches
|
| 591 |
+
)
|
| 592 |
+
mask_group_max_patches = (
|
| 593 |
+
mask_group_max_patches if mask_group_max_patches is not None else self.mask_group_max_patches
|
| 594 |
+
)
|
| 595 |
+
mask_group_min_aspect_ratio = (
|
| 596 |
+
mask_group_min_aspect_ratio
|
| 597 |
+
if mask_group_min_aspect_ratio is not None
|
| 598 |
+
else self.mask_group_min_aspect_ratio
|
| 599 |
+
)
|
| 600 |
+
mask_group_max_aspect_ratio = (
|
| 601 |
+
mask_group_max_aspect_ratio
|
| 602 |
+
if mask_group_max_aspect_ratio is not None
|
| 603 |
+
else self.mask_group_max_aspect_ratio
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
return_codebook_pixels = (
|
| 607 |
+
return_codebook_pixels if return_codebook_pixels is not None else self.return_codebook_pixels
|
| 608 |
+
)
|
| 609 |
+
codebook_do_resize = codebook_do_resize if codebook_do_resize is not None else self.codebook_do_resize
|
| 610 |
+
codebook_size = codebook_size if codebook_size is not None else self.codebook_size
|
| 611 |
+
codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
|
| 612 |
+
codebook_resample = codebook_resample if codebook_resample is not None else self.codebook_resample
|
| 613 |
+
codebook_do_rescale = codebook_do_rescale if codebook_do_rescale is not None else self.codebook_do_rescale
|
| 614 |
+
codebook_rescale_factor = (
|
| 615 |
+
codebook_rescale_factor if codebook_rescale_factor is not None else self.codebook_rescale_factor
|
| 616 |
+
)
|
| 617 |
+
codebook_do_center_crop = (
|
| 618 |
+
codebook_do_center_crop if codebook_do_center_crop is not None else self.codebook_do_center_crop
|
| 619 |
+
)
|
| 620 |
+
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else self.codebook_crop_size
|
| 621 |
+
codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
|
| 622 |
+
codebook_do_map_pixels = (
|
| 623 |
+
codebook_do_map_pixels if codebook_do_map_pixels is not None else self.codebook_do_map_pixels
|
| 624 |
+
)
|
| 625 |
+
codebook_do_normalize = (
|
| 626 |
+
codebook_do_normalize if codebook_do_normalize is not None else self.codebook_do_normalize
|
| 627 |
+
)
|
| 628 |
+
codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else self.codebook_image_mean
|
| 629 |
+
codebook_image_std = codebook_image_std if codebook_image_std is not None else self.codebook_image_std
|
| 630 |
+
|
| 631 |
+
images = make_list_of_images(images)
|
| 632 |
+
|
| 633 |
+
if not valid_images(images):
|
| 634 |
+
raise ValueError(
|
| 635 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
| 636 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
| 637 |
+
)
|
| 638 |
+
|
| 639 |
+
processed_images = [
|
| 640 |
+
self._preprocess_image(
|
| 641 |
+
image=img,
|
| 642 |
+
do_resize=do_resize,
|
| 643 |
+
size=size,
|
| 644 |
+
resample=resample,
|
| 645 |
+
do_center_crop=do_center_crop,
|
| 646 |
+
crop_size=crop_size,
|
| 647 |
+
do_rescale=do_rescale,
|
| 648 |
+
rescale_factor=rescale_factor,
|
| 649 |
+
do_normalize=do_normalize,
|
| 650 |
+
image_mean=image_mean,
|
| 651 |
+
image_std=image_std,
|
| 652 |
+
do_map_pixels=False,
|
| 653 |
+
data_format=data_format,
|
| 654 |
+
input_data_format=input_data_format,
|
| 655 |
+
)
|
| 656 |
+
for img in images
|
| 657 |
+
]
|
| 658 |
+
data = {"pixel_values": processed_images}
|
| 659 |
+
|
| 660 |
+
if return_codebook_pixels:
|
| 661 |
+
codebook_images = [
|
| 662 |
+
self._preprocess_image(
|
| 663 |
+
image=img,
|
| 664 |
+
do_resize=codebook_do_resize,
|
| 665 |
+
size=codebook_size,
|
| 666 |
+
resample=codebook_resample,
|
| 667 |
+
do_center_crop=codebook_do_center_crop,
|
| 668 |
+
crop_size=codebook_crop_size,
|
| 669 |
+
do_rescale=codebook_do_rescale,
|
| 670 |
+
rescale_factor=codebook_rescale_factor,
|
| 671 |
+
do_normalize=codebook_do_normalize,
|
| 672 |
+
image_mean=codebook_image_mean,
|
| 673 |
+
image_std=codebook_image_std,
|
| 674 |
+
do_map_pixels=codebook_do_map_pixels,
|
| 675 |
+
data_format=data_format,
|
| 676 |
+
input_data_format=input_data_format,
|
| 677 |
+
)
|
| 678 |
+
for img in images
|
| 679 |
+
]
|
| 680 |
+
data["codebook_pixel_values"] = codebook_images
|
| 681 |
+
|
| 682 |
+
if return_image_mask:
|
| 683 |
+
mask_generator = self.masking_generator(
|
| 684 |
+
input_size_patches=input_size_patches,
|
| 685 |
+
total_mask_patches=total_mask_patches,
|
| 686 |
+
mask_group_min_patches=mask_group_min_patches,
|
| 687 |
+
mask_group_max_patches=mask_group_max_patches,
|
| 688 |
+
mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
|
| 689 |
+
mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
|
| 690 |
+
)
|
| 691 |
+
masks = [mask_generator() for _ in images]
|
| 692 |
+
data["bool_masked_pos"] = masks
|
| 693 |
+
|
| 694 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/modeling_flava.py
ADDED
|
@@ -0,0 +1,2099 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" PyTorch FLAVA model."""
|
| 16 |
+
|
| 17 |
+
import collections
|
| 18 |
+
import math
|
| 19 |
+
from collections import OrderedDict
|
| 20 |
+
from dataclasses import dataclass
|
| 21 |
+
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.utils.checkpoint
|
| 25 |
+
from torch import nn
|
| 26 |
+
|
| 27 |
+
from ...activations import ACT2FN
|
| 28 |
+
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
|
| 29 |
+
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
|
| 30 |
+
from ...utils import (
|
| 31 |
+
ModelOutput,
|
| 32 |
+
add_code_sample_docstrings,
|
| 33 |
+
add_start_docstrings,
|
| 34 |
+
add_start_docstrings_to_model_forward,
|
| 35 |
+
logging,
|
| 36 |
+
replace_return_docstrings,
|
| 37 |
+
)
|
| 38 |
+
from .configuration_flava import (
|
| 39 |
+
FlavaConfig,
|
| 40 |
+
FlavaImageCodebookConfig,
|
| 41 |
+
FlavaImageConfig,
|
| 42 |
+
FlavaMultimodalConfig,
|
| 43 |
+
FlavaTextConfig,
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__)
|
| 48 |
+
|
| 49 |
+
_CHECKPOINT_FOR_DOC = "facebook/flava-full"
|
| 50 |
+
|
| 51 |
+
# Codebook docstring
|
| 52 |
+
_CHECKPOINT_FOR_CODEBOOK_DOC = "facebook/flava-image-codebook"
|
| 53 |
+
_CONFIG_CLASS_FOR_IMAGE_MODEL_DOC = "FlavaImageConfig"
|
| 54 |
+
_CONFIG_CLASS_FOR_TEXT_MODEL_DOC = "FlavaTextConfig"
|
| 55 |
+
_CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC = "FlavaMultimodalConfig"
|
| 56 |
+
_EXPECTED_IMAGE_OUTPUT_SHAPE = [1, 197, 768]
|
| 57 |
+
|
| 58 |
+
FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 59 |
+
"facebook/flava-full",
|
| 60 |
+
# See all flava models at https://huggingface.co/models?filter=flava
|
| 61 |
+
]
|
| 62 |
+
FLAVA_CODEBOOK_PRETRAINED_MODEL_ARCHIVE_LIST = ["facebook/flava-image-codebook"]
|
| 63 |
+
LOGIT_SCALE_CLAMP_MIN = 0
|
| 64 |
+
LOGIT_SCALE_CLAMP_MAX = 4.6052
|
| 65 |
+
|
| 66 |
+
FlavaPossibleConfigs = Union[FlavaTextConfig, FlavaImageConfig, FlavaMultimodalConfig]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@dataclass
|
| 70 |
+
class FlavaModelOutput(ModelOutput):
|
| 71 |
+
"""
|
| 72 |
+
Output from FlavaModel containing embeddings and outputs from individual encoders.
|
| 73 |
+
|
| 74 |
+
Note that `image_embeddings` and `text_embeddigns` returned are similar to pooled output returned from a
|
| 75 |
+
transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and
|
| 76 |
+
`text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
|
| 80 |
+
The image embeddings which are basically the pooled output of [`FlavaImageModel`].
|
| 81 |
+
image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
|
| 82 |
+
The output of the [`FlavaImageModel`].
|
| 83 |
+
text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
|
| 84 |
+
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
|
| 85 |
+
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
|
| 86 |
+
The output of the [`FlavaTextModel`].
|
| 87 |
+
multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
|
| 88 |
+
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
|
| 89 |
+
multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
|
| 90 |
+
The output of the [`FlavaMultimodalModel`].
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
image_embeddings: Optional[torch.FloatTensor] = None
|
| 94 |
+
image_output: Optional[BaseModelOutputWithPooling] = None
|
| 95 |
+
text_embeddings: Optional[torch.FloatTensor] = None
|
| 96 |
+
text_output: Optional[BaseModelOutputWithPooling] = None
|
| 97 |
+
multimodal_embeddings: Optional[torch.FloatTensor] = None
|
| 98 |
+
multimodal_output: Optional[BaseModelOutputWithPooling] = None
|
| 99 |
+
|
| 100 |
+
def to_tuple(self) -> Tuple[Any]:
|
| 101 |
+
return tuple(
|
| 102 |
+
self[k] if k not in ["text_output", "image_output", "multimodal_output"] else getattr(self, k).to_tuple()
|
| 103 |
+
for k in self.keys()
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@dataclass
|
| 108 |
+
class FlavaLosses(ModelOutput):
|
| 109 |
+
"""Class representing pretraining losses from FLAVA model
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
mim (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels` and `pixel_values` are present, `input_ids_masked` is absent and `mim_weight` > 0.:
|
| 113 |
+
Masked Image Modeling loss as used in BeIT calculated only for unimodal image data.
|
| 114 |
+
mlm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels` and `input_ids_masked` are present, `pixel_values` is absent and `mlm_weight` > 0.:
|
| 115 |
+
Masked Language Modeling loss as used in BERT calculated only for unimodal text data.
|
| 116 |
+
itm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `itm_labels`, `input_ids_masked`, `pixel_values` are present and `itm_weight` > 0.:
|
| 117 |
+
Image Text Matching (ITM) loss calculated for paired image-text data. Note that ITM loss is calculated on
|
| 118 |
+
masked pairs in FLAVA.
|
| 119 |
+
global_contrastive (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `input_ids` and `pixel_values` are present and `global_contrastive_weight` > 0.:
|
| 120 |
+
Contrastive loss for image-text similarity similar to CLIP but calculated globally for paired image-text
|
| 121 |
+
data. This is calculated on unmasked images and texts.
|
| 122 |
+
mmm_image (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_image_weight` > 0.:
|
| 123 |
+
Masked Multimodal Modeling loss's image component calculated on paired image-text data.
|
| 124 |
+
mmm_text (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_text_weight` > 0.:
|
| 125 |
+
Masked Multimodal Modeling loss's text component calculated on paired image-text data.
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
mim: Optional[torch.FloatTensor] = None
|
| 129 |
+
mlm: Optional[torch.FloatTensor] = None
|
| 130 |
+
itm: Optional[torch.FloatTensor] = None
|
| 131 |
+
global_contrastive: Optional[torch.FloatTensor] = None
|
| 132 |
+
mmm_image: Optional[torch.FloatTensor] = None
|
| 133 |
+
mmm_text: Optional[torch.FloatTensor] = None
|
| 134 |
+
|
| 135 |
+
def all_none(self) -> bool:
|
| 136 |
+
all_none = True
|
| 137 |
+
for v in self.values():
|
| 138 |
+
if v is not None:
|
| 139 |
+
all_none = False
|
| 140 |
+
break
|
| 141 |
+
return all_none
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@dataclass
|
| 145 |
+
class FlavaForPreTrainingOutput(ModelOutput):
|
| 146 |
+
"""
|
| 147 |
+
Output from FlavaForPreTraining containing embeddings, and outputs from individual encoders.
|
| 148 |
+
|
| 149 |
+
Note that `image_embeddings` and `text_embeddings` returned are similar to pooled output returned from a
|
| 150 |
+
transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and
|
| 151 |
+
`text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
|
| 152 |
+
|
| 153 |
+
Args:
|
| 154 |
+
loss (`torch.FloatTensor`, *optional*, returned when `return_loss` is True):
|
| 155 |
+
Total loss calculated for this model.
|
| 156 |
+
loss_info (`FlavaLosses`):
|
| 157 |
+
Detailed info for FLAVA Pretraining losses. Check `FlavaLosses` class description for the information on
|
| 158 |
+
the keys.
|
| 159 |
+
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
|
| 160 |
+
The image embeddings which are basically the pooled output of [`FlavaImageModel`].
|
| 161 |
+
image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
|
| 162 |
+
The output of the [`FlavaImageModel`].
|
| 163 |
+
text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
|
| 164 |
+
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
|
| 165 |
+
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
|
| 166 |
+
The output of the [`FlavaTextModel`].
|
| 167 |
+
multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
|
| 168 |
+
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
|
| 169 |
+
multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
|
| 170 |
+
The output of the [`FlavaMultimodalModel`].
|
| 171 |
+
|
| 172 |
+
image_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
|
| 173 |
+
The image embeddings which are basically the pooled output of [`FlavaImageModel`]. Uses `bool_masked_pos`
|
| 174 |
+
to create masked images.
|
| 175 |
+
image_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
|
| 176 |
+
The output of the [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images.
|
| 177 |
+
text_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids_masked` are present):
|
| 178 |
+
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
|
| 179 |
+
text_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` are present):
|
| 180 |
+
The output of the [`FlavaTextModel`].
|
| 181 |
+
multimodal_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present):
|
| 182 |
+
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
|
| 183 |
+
multimodal_masked_output (`BaseModelOutputWithPooling`, returned when `input_ids_masked` and `pixel_values` are present):
|
| 184 |
+
The output of the [`FlavaMultimodalModel`].
|
| 185 |
+
|
| 186 |
+
mim_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape `(total_masked_patches, image_vocab_size)` , *optional*, returned when `pixel_values` are present and `input_ids_masked` are not):
|
| 187 |
+
The logits for MIM unimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is
|
| 188 |
+
returned when `bool_masked_pos` has some of the patches masked.
|
| 189 |
+
mlm_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(total_masked_seq_length, text_vocab_size)`, *optional*, returned when `input_ids_masked` are present and `pixel_values` are not):
|
| 190 |
+
The logits for MLM unimodal loss. The flattened output is returned when `input_ids_masked` has some of
|
| 191 |
+
the tokens masked.
|
| 192 |
+
itm_logits (`torch.FloatTensor` of shape `(batch_size, 2)`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
|
| 193 |
+
The logits for ITM loss. Note that ITM loss is calculated on masked pairs in FLAVA.
|
| 194 |
+
mmm_image_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape`(total_masked_patches, image_vocab_size)`, *optional*, returned when `pixel_values` and `input_ids_masked` are present):
|
| 195 |
+
The logits for MMM image multimodal loss. Uses `book_masked_pos` to get masked patches. The flattened
|
| 196 |
+
output is returned when `bool_masked_pos` has some of the patches masked.
|
| 197 |
+
mmm_text_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(`(total_masked_seq_length, text_vocab_size)`), *optional*, returned when `pixel_values` and `input_ids_masked` are present):
|
| 198 |
+
The logits for MMM text multimodal loss. The flattened output is returned when `input_ids_masked` has
|
| 199 |
+
some of the tokens masked.
|
| 200 |
+
contrastive_logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
|
| 201 |
+
The scaled dot product scores between `image_embeddings` and `text_embeddings` but passed through FLAVA's
|
| 202 |
+
`image_projection` and `text_projection` layers respectively. This represents the image-text similarity
|
| 203 |
+
scores. This is calculated on unmasked images and texts.
|
| 204 |
+
contrastive_logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
|
| 205 |
+
The scaled dot product scores between `text_embeddings` and `image_embeddings` but passed through FLAVA's
|
| 206 |
+
`text_projection` and `image_projection` layers respectively. This is calculated on unmasked images and
|
| 207 |
+
texts.
|
| 208 |
+
"""
|
| 209 |
+
|
| 210 |
+
loss: Optional[torch.FloatTensor] = None
|
| 211 |
+
loss_info: FlavaLosses = None
|
| 212 |
+
image_embeddings: Optional[torch.FloatTensor] = None
|
| 213 |
+
image_output: Optional[BaseModelOutputWithPooling] = None
|
| 214 |
+
text_embeddings: Optional[torch.FloatTensor] = None
|
| 215 |
+
text_output: Optional[BaseModelOutputWithPooling] = None
|
| 216 |
+
multimodal_embeddings: Optional[torch.FloatTensor] = None
|
| 217 |
+
multimodal_output: Optional[BaseModelOutputWithPooling] = None
|
| 218 |
+
image_masked_embeddings: Optional[torch.FloatTensor] = None
|
| 219 |
+
image_masked_output: Optional[BaseModelOutputWithPooling] = None
|
| 220 |
+
text_masked_embeddings: Optional[torch.FloatTensor] = None
|
| 221 |
+
text_masked_output: Optional[BaseModelOutputWithPooling] = None
|
| 222 |
+
multimodal_masked_embeddings: Optional[torch.FloatTensor] = None
|
| 223 |
+
multimodal_masked_output: Optional[BaseModelOutputWithPooling] = None
|
| 224 |
+
mim_logits: Optional[torch.FloatTensor] = None
|
| 225 |
+
mlm_logits: Optional[torch.FloatTensor] = None
|
| 226 |
+
itm_logits: Optional[torch.FloatTensor] = None
|
| 227 |
+
contrastive_logits_per_image: Optional[torch.FloatTensor] = None
|
| 228 |
+
contrastive_logits_per_text: Optional[torch.FloatTensor] = None
|
| 229 |
+
mmm_image_logits: Optional[torch.FloatTensor] = None
|
| 230 |
+
mmm_text_logits: Optional[torch.FloatTensor] = None
|
| 231 |
+
|
| 232 |
+
def to_tuple(self) -> Tuple[Any]:
|
| 233 |
+
transformer_outputs = [
|
| 234 |
+
"text_output",
|
| 235 |
+
"image_output",
|
| 236 |
+
"multimodal_output",
|
| 237 |
+
"text_masked_output",
|
| 238 |
+
"image_masked_output",
|
| 239 |
+
"multimodal_masked_output",
|
| 240 |
+
]
|
| 241 |
+
return tuple(self[k] if k not in transformer_outputs else getattr(self, k).to_tuple() for k in self.keys())
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
# Based on timm implementation, which can be found here:
|
| 245 |
+
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py
|
| 246 |
+
class FlavaImageEmbeddings(nn.Module):
|
| 247 |
+
"""
|
| 248 |
+
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
def __init__(self, config: FlavaImageConfig, use_mask_token: bool = False) -> None:
|
| 252 |
+
super().__init__()
|
| 253 |
+
|
| 254 |
+
use_mask_token = use_mask_token or config.mask_token
|
| 255 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
|
| 256 |
+
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
|
| 257 |
+
self.patch_embeddings = PatchEmbeddings(
|
| 258 |
+
image_size=config.image_size,
|
| 259 |
+
patch_size=config.patch_size,
|
| 260 |
+
num_channels=config.num_channels,
|
| 261 |
+
embed_dim=config.hidden_size,
|
| 262 |
+
)
|
| 263 |
+
num_patches = self.patch_embeddings.num_patches
|
| 264 |
+
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
|
| 265 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 266 |
+
self.config = config
|
| 267 |
+
|
| 268 |
+
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
|
| 269 |
+
"""
|
| 270 |
+
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
|
| 271 |
+
resolution images.
|
| 272 |
+
|
| 273 |
+
Source:
|
| 274 |
+
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/image_transformer.py#L174
|
| 275 |
+
"""
|
| 276 |
+
|
| 277 |
+
npatch = embeddings.shape[1] - 1
|
| 278 |
+
num_pos = self.position_embeddings.shape[1] - 1
|
| 279 |
+
if npatch == num_pos and height == width:
|
| 280 |
+
return self.position_embeddings
|
| 281 |
+
class_pos_embed = self.position_embeddings[:, 0]
|
| 282 |
+
patch_pos_embed = self.position_embeddings[:, 1:]
|
| 283 |
+
dim = embeddings.shape[-1]
|
| 284 |
+
num_h_patches = height // self.config.patch_size
|
| 285 |
+
num_w_patches = width // self.config.patch_size
|
| 286 |
+
# we add a small number to avoid floating point error in the interpolation
|
| 287 |
+
# see discussion at https://github.com/facebookresearch/dino/issues/8
|
| 288 |
+
num_h_patches, num_w_patches = num_h_patches + 0.1, num_w_patches + 0.1
|
| 289 |
+
patch_pos_embed = nn.functional.interpolate(
|
| 290 |
+
patch_pos_embed.reshape(1, int(math.sqrt(num_pos)), int(math.sqrt(num_pos)), dim).permute(0, 3, 1, 2),
|
| 291 |
+
scale_factor=(num_h_patches / math.sqrt(num_pos), num_w_patches / math.sqrt(num_pos)),
|
| 292 |
+
mode="bicubic",
|
| 293 |
+
align_corners=False,
|
| 294 |
+
)
|
| 295 |
+
if int(num_h_patches) != patch_pos_embed.shape[-2] or int(num_w_patches) != patch_pos_embed.shape[-1]:
|
| 296 |
+
raise ValueError(
|
| 297 |
+
f"Number of patches for images ({int(num_h_patches), int(num_w_patches)}) don't match the "
|
| 298 |
+
f"shape of position embedding ({patch_pos_embed.shape[-2], patch_pos_embed.shape[-1]})"
|
| 299 |
+
)
|
| 300 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
| 301 |
+
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
|
| 302 |
+
|
| 303 |
+
def forward(
|
| 304 |
+
self,
|
| 305 |
+
pixel_values: torch.Tensor,
|
| 306 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
| 307 |
+
interpolate_pos_encoding: bool = False,
|
| 308 |
+
) -> torch.Tensor:
|
| 309 |
+
batch_size, num_channels, height, width = pixel_values.shape
|
| 310 |
+
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
|
| 311 |
+
|
| 312 |
+
batch_size, seq_len, _ = embeddings.size()
|
| 313 |
+
if bool_masked_pos is not None:
|
| 314 |
+
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
|
| 315 |
+
# B X H X W = B X HW
|
| 316 |
+
if bool_masked_pos.dim() == 3:
|
| 317 |
+
bool_masked_pos = bool_masked_pos.view(bool_masked_pos.size(0), -1)
|
| 318 |
+
# replace the masked visual tokens by mask_tokens
|
| 319 |
+
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
|
| 320 |
+
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
|
| 321 |
+
|
| 322 |
+
# add the [CLS] token to the embedded patch tokens
|
| 323 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
|
| 324 |
+
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
|
| 325 |
+
|
| 326 |
+
# add positional encoding to each token
|
| 327 |
+
if interpolate_pos_encoding:
|
| 328 |
+
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
|
| 329 |
+
else:
|
| 330 |
+
embeddings = embeddings + self.position_embeddings
|
| 331 |
+
|
| 332 |
+
embeddings = self.dropout(embeddings)
|
| 333 |
+
|
| 334 |
+
return embeddings
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
# Based on timm implementation, which can be found here:
|
| 338 |
+
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py
|
| 339 |
+
class PatchEmbeddings(nn.Module):
|
| 340 |
+
"""
|
| 341 |
+
Image to Patch Embedding.
|
| 342 |
+
"""
|
| 343 |
+
|
| 344 |
+
def __init__(
|
| 345 |
+
self,
|
| 346 |
+
image_size: int = 224,
|
| 347 |
+
patch_size: Union[int, Tuple[int, int]] = 16,
|
| 348 |
+
num_channels: int = 3,
|
| 349 |
+
embed_dim: int = 768,
|
| 350 |
+
):
|
| 351 |
+
super().__init__()
|
| 352 |
+
if not isinstance(image_size, collections.abc.Iterable):
|
| 353 |
+
image_size = (image_size, image_size)
|
| 354 |
+
if not isinstance(patch_size, collections.abc.Iterable):
|
| 355 |
+
patch_size = (patch_size, patch_size)
|
| 356 |
+
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
|
| 357 |
+
self.image_size = image_size
|
| 358 |
+
self.patch_size = patch_size
|
| 359 |
+
self.num_patches = num_patches
|
| 360 |
+
|
| 361 |
+
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
|
| 362 |
+
|
| 363 |
+
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
|
| 364 |
+
batch_size, num_channels, height, width = pixel_values.shape
|
| 365 |
+
if not interpolate_pos_encoding:
|
| 366 |
+
if height != self.image_size[0] or width != self.image_size[1]:
|
| 367 |
+
raise ValueError(
|
| 368 |
+
f"Input image size ({height}*{width}) doesn't match model"
|
| 369 |
+
f" ({self.image_size[0]}*{self.image_size[1]})."
|
| 370 |
+
)
|
| 371 |
+
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
|
| 372 |
+
return x
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
class FlavaTextEmbeddings(nn.Module):
|
| 376 |
+
"""Construct the embeddings from word, position and token_type embeddings."""
|
| 377 |
+
|
| 378 |
+
def __init__(self, config):
|
| 379 |
+
super().__init__()
|
| 380 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
| 381 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
| 382 |
+
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
| 383 |
+
|
| 384 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
| 385 |
+
# any TensorFlow checkpoint file
|
| 386 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 387 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 388 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
| 389 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
| 390 |
+
self.register_buffer(
|
| 391 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
| 392 |
+
)
|
| 393 |
+
self.register_buffer(
|
| 394 |
+
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
def forward(
|
| 398 |
+
self,
|
| 399 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 400 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 401 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 402 |
+
):
|
| 403 |
+
input_shape = input_ids.size()
|
| 404 |
+
seq_length = input_shape[1]
|
| 405 |
+
|
| 406 |
+
if position_ids is None:
|
| 407 |
+
position_ids = self.position_ids[:, :seq_length]
|
| 408 |
+
|
| 409 |
+
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
|
| 410 |
+
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
|
| 411 |
+
# issue #5664
|
| 412 |
+
if token_type_ids is None:
|
| 413 |
+
if hasattr(self, "token_type_ids"):
|
| 414 |
+
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
|
| 415 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
|
| 416 |
+
token_type_ids = buffered_token_type_ids_expanded
|
| 417 |
+
else:
|
| 418 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
|
| 419 |
+
|
| 420 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
| 421 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
| 422 |
+
|
| 423 |
+
embeddings = inputs_embeds + token_type_embeddings
|
| 424 |
+
if self.position_embedding_type == "absolute":
|
| 425 |
+
position_embeddings = self.position_embeddings(position_ids)
|
| 426 |
+
embeddings += position_embeddings
|
| 427 |
+
embeddings = self.LayerNorm(embeddings)
|
| 428 |
+
embeddings = self.dropout(embeddings)
|
| 429 |
+
return embeddings
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
class FlavaSelfAttention(nn.Module):
|
| 433 |
+
def __init__(self, config: FlavaPossibleConfigs) -> None:
|
| 434 |
+
super().__init__()
|
| 435 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
| 436 |
+
raise ValueError(
|
| 437 |
+
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
|
| 438 |
+
f"heads {config.num_attention_heads}."
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
self.num_attention_heads = config.num_attention_heads
|
| 442 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
| 443 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 444 |
+
|
| 445 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
| 446 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
| 447 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
| 448 |
+
|
| 449 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 450 |
+
|
| 451 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
| 452 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
| 453 |
+
x = x.view(*new_x_shape)
|
| 454 |
+
return x.permute(0, 2, 1, 3)
|
| 455 |
+
|
| 456 |
+
def forward(
|
| 457 |
+
self,
|
| 458 |
+
hidden_states: torch.Tensor,
|
| 459 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 460 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 461 |
+
output_attentions: bool = False,
|
| 462 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 463 |
+
mixed_query_layer = self.query(hidden_states)
|
| 464 |
+
|
| 465 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 466 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 467 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 468 |
+
|
| 469 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 470 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
| 471 |
+
|
| 472 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 473 |
+
if attention_mask is not None:
|
| 474 |
+
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
| 475 |
+
attention_scores = attention_scores + attention_mask
|
| 476 |
+
|
| 477 |
+
# Normalize the attention scores to probabilities.
|
| 478 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 479 |
+
# Normalize the attention scores to probabilities.
|
| 480 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 481 |
+
|
| 482 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 483 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 484 |
+
attention_probs = self.dropout(attention_probs)
|
| 485 |
+
|
| 486 |
+
# Mask heads if we want to
|
| 487 |
+
if head_mask is not None:
|
| 488 |
+
attention_probs = attention_probs * head_mask
|
| 489 |
+
|
| 490 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
| 491 |
+
|
| 492 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 493 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 494 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
| 495 |
+
|
| 496 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 497 |
+
|
| 498 |
+
return outputs
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class FlavaSelfOutput(nn.Module):
|
| 502 |
+
"""
|
| 503 |
+
The residual connection is defined in FlavaLayer (same as ViTLayer) instead of here (as is the case with other
|
| 504 |
+
models), due to the layernorm applied before each block.
|
| 505 |
+
"""
|
| 506 |
+
|
| 507 |
+
def __init__(self, config: FlavaPossibleConfigs) -> None:
|
| 508 |
+
super().__init__()
|
| 509 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 510 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 511 |
+
|
| 512 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
| 513 |
+
hidden_states = self.dense(hidden_states)
|
| 514 |
+
hidden_states = self.dropout(hidden_states)
|
| 515 |
+
|
| 516 |
+
return hidden_states
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
class FlavaAttention(nn.Module):
|
| 520 |
+
def __init__(self, config: FlavaPossibleConfigs) -> None:
|
| 521 |
+
super().__init__()
|
| 522 |
+
self.attention = FlavaSelfAttention(config)
|
| 523 |
+
self.output = FlavaSelfOutput(config)
|
| 524 |
+
self.pruned_heads = set()
|
| 525 |
+
|
| 526 |
+
def prune_heads(self, heads: Set[int]) -> None:
|
| 527 |
+
if len(heads) == 0:
|
| 528 |
+
return
|
| 529 |
+
heads, index = find_pruneable_heads_and_indices(
|
| 530 |
+
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
|
| 531 |
+
)
|
| 532 |
+
|
| 533 |
+
# Prune linear layers
|
| 534 |
+
self.attention.query = prune_linear_layer(self.attention.query, index)
|
| 535 |
+
self.attention.key = prune_linear_layer(self.attention.key, index)
|
| 536 |
+
self.attention.value = prune_linear_layer(self.attention.value, index)
|
| 537 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
| 538 |
+
|
| 539 |
+
# Update hyper params and store pruned heads
|
| 540 |
+
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
|
| 541 |
+
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
|
| 542 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 543 |
+
|
| 544 |
+
def forward(
|
| 545 |
+
self,
|
| 546 |
+
hidden_states: torch.Tensor,
|
| 547 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 548 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 549 |
+
output_attentions: bool = False,
|
| 550 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 551 |
+
self_outputs = self.attention(
|
| 552 |
+
hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions
|
| 553 |
+
)
|
| 554 |
+
|
| 555 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
| 556 |
+
|
| 557 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
| 558 |
+
return outputs
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
class FlavaIntermediate(nn.Module):
|
| 562 |
+
def __init__(self, config: FlavaPossibleConfigs) -> None:
|
| 563 |
+
super().__init__()
|
| 564 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 565 |
+
if isinstance(config.hidden_act, str):
|
| 566 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
| 567 |
+
else:
|
| 568 |
+
self.intermediate_act_fn = config.hidden_act
|
| 569 |
+
|
| 570 |
+
# Copied from transformers.models.vit.modeling_vit.ViTIntermediate.forward
|
| 571 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 572 |
+
hidden_states = self.dense(hidden_states)
|
| 573 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 574 |
+
|
| 575 |
+
return hidden_states
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
class FlavaOutput(nn.Module):
|
| 579 |
+
def __init__(self, config: FlavaPossibleConfigs) -> None:
|
| 580 |
+
super().__init__()
|
| 581 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 582 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 583 |
+
|
| 584 |
+
# Copied from transformers.models.vit.modeling_vit.ViTOutput.forward
|
| 585 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
| 586 |
+
hidden_states = self.dense(hidden_states)
|
| 587 |
+
hidden_states = self.dropout(hidden_states)
|
| 588 |
+
|
| 589 |
+
hidden_states = hidden_states + input_tensor
|
| 590 |
+
|
| 591 |
+
return hidden_states
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
class FlavaLayer(nn.Module):
|
| 595 |
+
"""This corresponds to the Block class in the timm implementation."""
|
| 596 |
+
|
| 597 |
+
def __init__(self, config: FlavaPossibleConfigs) -> None:
|
| 598 |
+
super().__init__()
|
| 599 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
| 600 |
+
self.seq_len_dim = 1
|
| 601 |
+
self.attention = FlavaAttention(config)
|
| 602 |
+
self.intermediate = FlavaIntermediate(config)
|
| 603 |
+
self.output = FlavaOutput(config)
|
| 604 |
+
|
| 605 |
+
# TODO: Check fp32 layer norm possiblity
|
| 606 |
+
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 607 |
+
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 608 |
+
|
| 609 |
+
def forward(
|
| 610 |
+
self,
|
| 611 |
+
hidden_states: torch.Tensor,
|
| 612 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 613 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 614 |
+
output_attentions: bool = False,
|
| 615 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 616 |
+
self_attention_outputs = self.attention(
|
| 617 |
+
self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention
|
| 618 |
+
attention_mask=attention_mask,
|
| 619 |
+
head_mask=head_mask,
|
| 620 |
+
output_attentions=output_attentions,
|
| 621 |
+
)
|
| 622 |
+
attention_output = self_attention_outputs[0]
|
| 623 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
| 624 |
+
|
| 625 |
+
# first residual connection
|
| 626 |
+
hidden_states = attention_output + hidden_states
|
| 627 |
+
|
| 628 |
+
# in ViT, layernorm is also applied after self-attention
|
| 629 |
+
layer_output = self.layernorm_after(hidden_states)
|
| 630 |
+
layer_output = self.intermediate(layer_output)
|
| 631 |
+
|
| 632 |
+
# second residual connection is done here
|
| 633 |
+
layer_output = self.output(layer_output, hidden_states)
|
| 634 |
+
|
| 635 |
+
outputs = (layer_output,) + outputs
|
| 636 |
+
|
| 637 |
+
return outputs
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
class FlavaEncoder(nn.Module):
|
| 641 |
+
def __init__(self, config: FlavaConfig) -> None:
|
| 642 |
+
super().__init__()
|
| 643 |
+
self.config = config
|
| 644 |
+
self.layer = nn.ModuleList([FlavaLayer(config) for _ in range(config.num_hidden_layers)])
|
| 645 |
+
self.gradient_checkpointing = False
|
| 646 |
+
|
| 647 |
+
def forward(
|
| 648 |
+
self,
|
| 649 |
+
hidden_states: torch.Tensor,
|
| 650 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 651 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 652 |
+
output_attentions: bool = False,
|
| 653 |
+
output_hidden_states: bool = False,
|
| 654 |
+
return_dict: bool = True,
|
| 655 |
+
) -> Union[tuple, BaseModelOutput]:
|
| 656 |
+
all_hidden_states = () if output_hidden_states else None
|
| 657 |
+
all_self_attentions = () if output_attentions else None
|
| 658 |
+
|
| 659 |
+
for i, layer_module in enumerate(self.layer):
|
| 660 |
+
if output_hidden_states:
|
| 661 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 662 |
+
|
| 663 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 664 |
+
|
| 665 |
+
if self.gradient_checkpointing and self.training:
|
| 666 |
+
|
| 667 |
+
def create_custom_forward(module):
|
| 668 |
+
def custom_forward(*inputs):
|
| 669 |
+
return module(*inputs, output_attentions)
|
| 670 |
+
|
| 671 |
+
return custom_forward
|
| 672 |
+
|
| 673 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 674 |
+
create_custom_forward(layer_module),
|
| 675 |
+
hidden_states,
|
| 676 |
+
attention_mask,
|
| 677 |
+
layer_head_mask,
|
| 678 |
+
)
|
| 679 |
+
else:
|
| 680 |
+
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
|
| 681 |
+
|
| 682 |
+
hidden_states = layer_outputs[0]
|
| 683 |
+
|
| 684 |
+
if output_attentions:
|
| 685 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 686 |
+
|
| 687 |
+
if output_hidden_states:
|
| 688 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 689 |
+
|
| 690 |
+
if not return_dict:
|
| 691 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
| 692 |
+
return BaseModelOutput(
|
| 693 |
+
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
class FlavaPooler(nn.Module):
|
| 698 |
+
def __init__(self, config: FlavaPossibleConfigs):
|
| 699 |
+
super().__init__()
|
| 700 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 701 |
+
self.activation = nn.Tanh()
|
| 702 |
+
|
| 703 |
+
def forward(self, hidden_states: torch.Tensor):
|
| 704 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
| 705 |
+
# to the first token.
|
| 706 |
+
first_token_tensor = hidden_states[:, 0]
|
| 707 |
+
pooled_output = self.dense(first_token_tensor)
|
| 708 |
+
pooled_output = self.activation(pooled_output)
|
| 709 |
+
return pooled_output
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
FLAVA_START_DOCSTRING = r"""
|
| 713 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
| 714 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 715 |
+
behavior.
|
| 716 |
+
|
| 717 |
+
Parameters:
|
| 718 |
+
config ([`{config}`]): Model configuration class with all the parameters of the model.
|
| 719 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 720 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 721 |
+
"""
|
| 722 |
+
|
| 723 |
+
FLAVA_INPUTS_DOCSTRING_COMMON = r"""
|
| 724 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
| 725 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 726 |
+
- 1 for tokens that are **not masked**,
|
| 727 |
+
- 0 for tokens that are **masked**.
|
| 728 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 729 |
+
|
| 730 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 731 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 732 |
+
|
| 733 |
+
- 1 indicates the head is **not masked**,
|
| 734 |
+
- 0 indicates the head is **masked**.
|
| 735 |
+
|
| 736 |
+
output_attentions (`bool`, *optional*):
|
| 737 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 738 |
+
tensors for more detail.
|
| 739 |
+
output_hidden_states (`bool`, *optional*):
|
| 740 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 741 |
+
more detail.
|
| 742 |
+
|
| 743 |
+
return_dict (`bool`, *optional*):
|
| 744 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 745 |
+
"""
|
| 746 |
+
|
| 747 |
+
FLAVA_IMAGE_INPUTS_DOCSTRING_BASE = r"""
|
| 748 |
+
Args:
|
| 749 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 750 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 751 |
+
[`FlavaImageProcessor.__call__`] for details.
|
| 752 |
+
|
| 753 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
|
| 754 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
| 755 |
+
|
| 756 |
+
interpolate_pos_encoding (`bool`, *optional*):
|
| 757 |
+
Whether to interpolate the pre-trained position encodings.
|
| 758 |
+
"""
|
| 759 |
+
|
| 760 |
+
FLAVA_IMAGE_INPUTS_DOCSTRING = FLAVA_IMAGE_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON
|
| 761 |
+
|
| 762 |
+
FLAVA_TEXT_INPUTS_DOCSTRING_BASE = r"""
|
| 763 |
+
Args:
|
| 764 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
| 765 |
+
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
|
| 766 |
+
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
|
| 767 |
+
IDs?](../glossary#input-ids)
|
| 768 |
+
|
| 769 |
+
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
| 770 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
| 771 |
+
1]`:
|
| 772 |
+
- 0 corresponds to a *sentence A* token,
|
| 773 |
+
- 1 corresponds to a *sentence B* token.
|
| 774 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
| 775 |
+
"""
|
| 776 |
+
|
| 777 |
+
FLAVA_TEXT_INPUTS_DOCSTRING = FLAVA_TEXT_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON
|
| 778 |
+
|
| 779 |
+
FLAVA_MULTIMODAL_INPUTS_DOCSTRING = (
|
| 780 |
+
r"""
|
| 781 |
+
Args:
|
| 782 |
+
hidden_states (`torch.FloatTensor` of shape `(batch_size, image_num_patches + text_seq_len, hidden_size)`):
|
| 783 |
+
The concatenated hidden states of unimodal encoders.
|
| 784 |
+
"""
|
| 785 |
+
+ FLAVA_INPUTS_DOCSTRING_COMMON
|
| 786 |
+
)
|
| 787 |
+
|
| 788 |
+
FLAVA_MODEL_INPUTS_DOCSTRING_BASE = r"""
|
| 789 |
+
Args:
|
| 790 |
+
skip_multimodal_encoder (*bool*, *optional*):
|
| 791 |
+
Skip any calculations for multimodal encoder. Useful if multimodal encoding is not going to be used.
|
| 792 |
+
"""
|
| 793 |
+
|
| 794 |
+
FLAVA_MODEL_INPUTS_DOCSTRING = (
|
| 795 |
+
FLAVA_IMAGE_INPUTS_DOCSTRING_BASE
|
| 796 |
+
+ FLAVA_TEXT_INPUTS_DOCSTRING_BASE
|
| 797 |
+
+ FLAVA_INPUTS_DOCSTRING_COMMON
|
| 798 |
+
+ FLAVA_MODEL_INPUTS_DOCSTRING_BASE
|
| 799 |
+
)
|
| 800 |
+
|
| 801 |
+
|
| 802 |
+
FLAVA_PRETRAINING_INPUTS_DOCSTRING = (
|
| 803 |
+
r"""
|
| 804 |
+
Args:
|
| 805 |
+
input_ids_masked (`torch.LongTensor` of shape `({0})`):
|
| 806 |
+
Indices of input sequence tokens in the vocabulary. These ones are the masked version of the original task
|
| 807 |
+
to be used with MLM. Indices can be obtained using [`AutoTokenizer`] along with
|
| 808 |
+
[`DataCollatorForMaskedLanguageModeling`]. See [`PreTrainedTokenizer.encode`] and
|
| 809 |
+
[`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
|
| 810 |
+
|
| 811 |
+
"""
|
| 812 |
+
+ FLAVA_TEXT_INPUTS_DOCSTRING_BASE
|
| 813 |
+
+ FLAVA_IMAGE_INPUTS_DOCSTRING_BASE
|
| 814 |
+
+ r"""
|
| 815 |
+
image_attention_mask (`torch.FloatTensor` of shape `({1})`, *optional*):
|
| 816 |
+
Mask to avoid performing attention on padding token indices specifically for images. Mask values selected
|
| 817 |
+
in `[0, 1]`:
|
| 818 |
+
- 1 for tokens that are **not masked**,
|
| 819 |
+
- 0 for tokens that are **masked**.
|
| 820 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 821 |
+
|
| 822 |
+
skip_unmasked_multimodal_encoder (*bool*, *optional*):
|
| 823 |
+
Skip any calculations for multimodal encoder for unmasked inputs. FLAVA pretraining doesn't need unmasked
|
| 824 |
+
multimodal embeddings or outputs as of now.
|
| 825 |
+
|
| 826 |
+
mlm_labels (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):
|
| 827 |
+
Labels for computing the left-to-right language and multimodal masked modeling loss (next word prediction).
|
| 828 |
+
Indices should be in `[-100, 0, ..., text_config.vocab_size - 1]` (see `input_ids` docstring). Tokens with
|
| 829 |
+
indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0,
|
| 830 |
+
..., text_config.vocab_size - 1]`.
|
| 831 |
+
|
| 832 |
+
mim_labels (`torch.LongTensor` of shape `(batch_size, image_num_patches)`, *optional*):
|
| 833 |
+
Labels for computing the image and multimodal masked modeling loss. Indices should be in `[-100, 0, ...,
|
| 834 |
+
image_config.vocab_size - 1]`. Tokens with indices set to `-100` are ignored (masked), the loss is only
|
| 835 |
+
computed for the tokens with labels in `[0, ..., image_config.vocab_size - 1]`. If not passed, they are
|
| 836 |
+
generated automatically using the image codebook assigned to the model. By default, it uses
|
| 837 |
+
[`FlavaImageCodebook`]. See [`FlavaImageCodebook`] to understand how to generate mim_labels.
|
| 838 |
+
|
| 839 |
+
itm_labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
|
| 840 |
+
Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
|
| 841 |
+
The pairs with 0 will be skipped for calculation of MMM and global contrastive losses as well.
|
| 842 |
+
|
| 843 |
+
return_loss (`bool`, *optional*, default to None):
|
| 844 |
+
Whether to return calculated loss or not.
|
| 845 |
+
"""
|
| 846 |
+
+ FLAVA_INPUTS_DOCSTRING_COMMON
|
| 847 |
+
)
|
| 848 |
+
|
| 849 |
+
FLAVA_PRETRAINING_START_DOCSTRING_EXTRA = r"""
|
| 850 |
+
Parameters:
|
| 851 |
+
image_codebook ([`nn.Module`]): If passed, the image codebook will be set to this. Otherwise. it will
|
| 852 |
+
be initialized using the image_codebook_config defined in the config first as the first parameter.
|
| 853 |
+
"""
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
class FlavaPreTrainedModel(PreTrainedModel):
|
| 857 |
+
"""
|
| 858 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 859 |
+
models.
|
| 860 |
+
"""
|
| 861 |
+
|
| 862 |
+
config_class = FlavaConfig
|
| 863 |
+
base_model_prefix = "flava"
|
| 864 |
+
supports_gradient_checkpointing = True
|
| 865 |
+
|
| 866 |
+
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
|
| 867 |
+
"""Initialize the weights"""
|
| 868 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
| 869 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 870 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 871 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 872 |
+
if module.bias is not None:
|
| 873 |
+
module.bias.data.zero_()
|
| 874 |
+
elif isinstance(module, nn.Embedding):
|
| 875 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 876 |
+
if module.padding_idx is not None:
|
| 877 |
+
module.weight.data[module.padding_idx].zero_()
|
| 878 |
+
elif isinstance(module, nn.LayerNorm):
|
| 879 |
+
module.bias.data.zero_()
|
| 880 |
+
module.weight.data.fill_(1.0)
|
| 881 |
+
|
| 882 |
+
def _set_gradient_checkpointing(self, module: FlavaEncoder, value: bool = False) -> None:
|
| 883 |
+
if isinstance(module, FlavaEncoder):
|
| 884 |
+
module.gradient_checkpointing = value
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
@add_start_docstrings(
|
| 888 |
+
"The bare FLAVA Image Model transformer outputting raw hidden-states without any specific head on top.",
|
| 889 |
+
FLAVA_START_DOCSTRING.format(config="FlavaImageConfig"),
|
| 890 |
+
)
|
| 891 |
+
class FlavaImageModel(FlavaPreTrainedModel):
|
| 892 |
+
config_class = FlavaImageConfig
|
| 893 |
+
# This override allows us to load FlavaImageModel from FlavaModel/FlavaForPreTraining checkpoints.
|
| 894 |
+
base_model_prefix = "flava.image_model"
|
| 895 |
+
main_input_name = "pixel_values"
|
| 896 |
+
|
| 897 |
+
def __init__(self, config: FlavaImageConfig, add_pooling_layer: bool = True):
|
| 898 |
+
super().__init__(config)
|
| 899 |
+
|
| 900 |
+
self.config = config
|
| 901 |
+
|
| 902 |
+
self.embeddings = FlavaImageEmbeddings(config)
|
| 903 |
+
self.encoder = FlavaEncoder(config)
|
| 904 |
+
|
| 905 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 906 |
+
self.pooler = FlavaPooler(config) if add_pooling_layer else None
|
| 907 |
+
|
| 908 |
+
self.post_init()
|
| 909 |
+
|
| 910 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 911 |
+
return self.embeddings.patch_embeddings
|
| 912 |
+
|
| 913 |
+
def set_input_embeddings(self, value: nn.Module):
|
| 914 |
+
self.embeddings.patch_embeddings = value
|
| 915 |
+
|
| 916 |
+
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
|
| 917 |
+
"""
|
| 918 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 919 |
+
class PreTrainedModel
|
| 920 |
+
"""
|
| 921 |
+
for layer, heads in heads_to_prune.items():
|
| 922 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 923 |
+
|
| 924 |
+
@add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches"))
|
| 925 |
+
@add_code_sample_docstrings(
|
| 926 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 927 |
+
output_type=BaseModelOutputWithPooling,
|
| 928 |
+
config_class=_CONFIG_CLASS_FOR_IMAGE_MODEL_DOC,
|
| 929 |
+
modality="vision",
|
| 930 |
+
expected_output=_EXPECTED_IMAGE_OUTPUT_SHAPE,
|
| 931 |
+
)
|
| 932 |
+
def forward(
|
| 933 |
+
self,
|
| 934 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 935 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
| 936 |
+
interpolate_pos_encoding: Optional[bool] = None,
|
| 937 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 938 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 939 |
+
output_attentions: Optional[bool] = None,
|
| 940 |
+
output_hidden_states: Optional[bool] = None,
|
| 941 |
+
return_dict: Optional[bool] = None,
|
| 942 |
+
) -> Union[tuple, BaseModelOutputWithPooling]:
|
| 943 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 944 |
+
output_hidden_states = (
|
| 945 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 946 |
+
)
|
| 947 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 948 |
+
|
| 949 |
+
if pixel_values is None:
|
| 950 |
+
raise ValueError("You have to specify pixel_values")
|
| 951 |
+
|
| 952 |
+
# Prepare head mask if needed
|
| 953 |
+
# 1.0 in head_mask indicate we keep the head
|
| 954 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 955 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 956 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 957 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 958 |
+
|
| 959 |
+
embedding_output = self.embeddings(
|
| 960 |
+
pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
|
| 961 |
+
)
|
| 962 |
+
|
| 963 |
+
encoder_outputs = self.encoder(
|
| 964 |
+
embedding_output,
|
| 965 |
+
attention_mask=attention_mask,
|
| 966 |
+
head_mask=head_mask,
|
| 967 |
+
output_attentions=output_attentions,
|
| 968 |
+
output_hidden_states=output_hidden_states,
|
| 969 |
+
return_dict=return_dict,
|
| 970 |
+
)
|
| 971 |
+
sequence_output = encoder_outputs[0]
|
| 972 |
+
sequence_output = self.layernorm(sequence_output)
|
| 973 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
| 974 |
+
|
| 975 |
+
if not return_dict:
|
| 976 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 977 |
+
|
| 978 |
+
return BaseModelOutputWithPooling(
|
| 979 |
+
last_hidden_state=sequence_output,
|
| 980 |
+
pooler_output=pooled_output,
|
| 981 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 982 |
+
attentions=encoder_outputs.attentions,
|
| 983 |
+
)
|
| 984 |
+
|
| 985 |
+
|
| 986 |
+
@add_start_docstrings(
|
| 987 |
+
"The bare FLAVA Text Model transformer outputting raw hidden-states without any specific head on top.",
|
| 988 |
+
FLAVA_START_DOCSTRING.format(config="FlavaTextConfig"),
|
| 989 |
+
)
|
| 990 |
+
class FlavaTextModel(FlavaPreTrainedModel):
|
| 991 |
+
config_class = FlavaTextConfig
|
| 992 |
+
# This override allows us to load FlavaTextModel from FlavaModel/FlavaForPreTraining checkpoints.
|
| 993 |
+
base_model_prefix = "flava.text_model"
|
| 994 |
+
|
| 995 |
+
def __init__(self, config: FlavaTextConfig, add_pooling_layer: bool = True):
|
| 996 |
+
super().__init__(config)
|
| 997 |
+
self.config = config
|
| 998 |
+
|
| 999 |
+
self.embeddings = FlavaTextEmbeddings(config)
|
| 1000 |
+
self.encoder = FlavaEncoder(config)
|
| 1001 |
+
|
| 1002 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 1003 |
+
self.pooler = FlavaPooler(config) if add_pooling_layer else None
|
| 1004 |
+
|
| 1005 |
+
self.post_init()
|
| 1006 |
+
|
| 1007 |
+
def get_input_embeddings(self) -> PatchEmbeddings:
|
| 1008 |
+
return self.embeddings.word_embeddings
|
| 1009 |
+
|
| 1010 |
+
def set_input_embeddings(self, value: nn.Module):
|
| 1011 |
+
self.embeddings.word_embeddings = value
|
| 1012 |
+
|
| 1013 |
+
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
|
| 1014 |
+
"""
|
| 1015 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 1016 |
+
class PreTrainedModel
|
| 1017 |
+
"""
|
| 1018 |
+
for layer, heads in heads_to_prune.items():
|
| 1019 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 1020 |
+
|
| 1021 |
+
@add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length"))
|
| 1022 |
+
@add_code_sample_docstrings(
|
| 1023 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1024 |
+
output_type=BaseModelOutputWithPooling,
|
| 1025 |
+
config_class=_CONFIG_CLASS_FOR_TEXT_MODEL_DOC,
|
| 1026 |
+
)
|
| 1027 |
+
def forward(
|
| 1028 |
+
self,
|
| 1029 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1030 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1031 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1032 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1033 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1034 |
+
output_attentions: Optional[bool] = None,
|
| 1035 |
+
output_hidden_states: Optional[bool] = None,
|
| 1036 |
+
return_dict: Optional[bool] = None,
|
| 1037 |
+
) -> Union[tuple, BaseModelOutputWithPooling]:
|
| 1038 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1039 |
+
output_hidden_states = (
|
| 1040 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1041 |
+
)
|
| 1042 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1043 |
+
|
| 1044 |
+
if input_ids is None:
|
| 1045 |
+
raise ValueError("You have to specify input_ids")
|
| 1046 |
+
|
| 1047 |
+
input_shape = input_ids.size()
|
| 1048 |
+
|
| 1049 |
+
if attention_mask is None:
|
| 1050 |
+
attention_mask = torch.ones(input_shape, device=input_ids.device)
|
| 1051 |
+
|
| 1052 |
+
# Prepare head mask if needed
|
| 1053 |
+
# 1.0 in head_mask indicate we keep the head
|
| 1054 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 1055 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 1056 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 1057 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 1058 |
+
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
|
| 1059 |
+
attention_mask, input_shape, input_ids.device
|
| 1060 |
+
)
|
| 1061 |
+
|
| 1062 |
+
embedding_output = self.embeddings(
|
| 1063 |
+
input_ids=input_ids,
|
| 1064 |
+
token_type_ids=token_type_ids,
|
| 1065 |
+
position_ids=position_ids,
|
| 1066 |
+
)
|
| 1067 |
+
|
| 1068 |
+
encoder_outputs = self.encoder(
|
| 1069 |
+
embedding_output,
|
| 1070 |
+
attention_mask=extended_attention_mask,
|
| 1071 |
+
head_mask=head_mask,
|
| 1072 |
+
output_attentions=output_attentions,
|
| 1073 |
+
output_hidden_states=output_hidden_states,
|
| 1074 |
+
return_dict=return_dict,
|
| 1075 |
+
)
|
| 1076 |
+
sequence_output = encoder_outputs[0]
|
| 1077 |
+
sequence_output = self.layernorm(sequence_output)
|
| 1078 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
| 1079 |
+
|
| 1080 |
+
if not return_dict:
|
| 1081 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 1082 |
+
|
| 1083 |
+
return BaseModelOutputWithPooling(
|
| 1084 |
+
last_hidden_state=sequence_output,
|
| 1085 |
+
pooler_output=pooled_output,
|
| 1086 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 1087 |
+
attentions=encoder_outputs.attentions,
|
| 1088 |
+
)
|
| 1089 |
+
|
| 1090 |
+
|
| 1091 |
+
@add_start_docstrings(
|
| 1092 |
+
"The bare FLAVA Multimodal Model transformer outputting raw hidden-states without any specific head on top.",
|
| 1093 |
+
FLAVA_START_DOCSTRING.format(config="FlavaMultimodalConfig"),
|
| 1094 |
+
)
|
| 1095 |
+
class FlavaMultimodalModel(FlavaPreTrainedModel):
|
| 1096 |
+
config_class = FlavaMultimodalConfig
|
| 1097 |
+
# This override allows us to load FlavaMultimodalModel from FlavaModel/FlavaForPreTraining checkpoints.
|
| 1098 |
+
base_model_prefix = "flava.multimodal_model"
|
| 1099 |
+
main_input_name = "hidden_states"
|
| 1100 |
+
|
| 1101 |
+
def __init__(self, config: FlavaMultimodalConfig, add_pooling_layer=True):
|
| 1102 |
+
super().__init__(config)
|
| 1103 |
+
self.config = config
|
| 1104 |
+
self.use_cls_token = self.config.use_cls_token
|
| 1105 |
+
if self.use_cls_token:
|
| 1106 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
|
| 1107 |
+
|
| 1108 |
+
self.encoder = FlavaEncoder(config)
|
| 1109 |
+
|
| 1110 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 1111 |
+
self.pooler = FlavaPooler(config) if add_pooling_layer else None
|
| 1112 |
+
|
| 1113 |
+
self.post_init()
|
| 1114 |
+
|
| 1115 |
+
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
|
| 1116 |
+
"""
|
| 1117 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 1118 |
+
class PreTrainedModel
|
| 1119 |
+
"""
|
| 1120 |
+
for layer, heads in heads_to_prune.items():
|
| 1121 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 1122 |
+
|
| 1123 |
+
@add_start_docstrings_to_model_forward(
|
| 1124 |
+
FLAVA_MULTIMODAL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len")
|
| 1125 |
+
)
|
| 1126 |
+
@add_code_sample_docstrings(
|
| 1127 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1128 |
+
output_type=BaseModelOutputWithPooling,
|
| 1129 |
+
config_class=_CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC,
|
| 1130 |
+
)
|
| 1131 |
+
def forward(
|
| 1132 |
+
self,
|
| 1133 |
+
hidden_states: torch.Tensor,
|
| 1134 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1135 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1136 |
+
output_attentions: Optional[bool] = None,
|
| 1137 |
+
output_hidden_states: Optional[bool] = None,
|
| 1138 |
+
return_dict: Optional[bool] = None,
|
| 1139 |
+
) -> Union[tuple, BaseModelOutputWithPooling]:
|
| 1140 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1141 |
+
output_hidden_states = (
|
| 1142 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1143 |
+
)
|
| 1144 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1145 |
+
|
| 1146 |
+
batch_size, seq_length, _ = hidden_states.size()
|
| 1147 |
+
|
| 1148 |
+
if self.use_cls_token:
|
| 1149 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
|
| 1150 |
+
hidden_states = torch.cat((cls_tokens, hidden_states), dim=1)
|
| 1151 |
+
seq_length += 1
|
| 1152 |
+
|
| 1153 |
+
if attention_mask is None:
|
| 1154 |
+
attention_mask = torch.ones((batch_size, seq_length), device=hidden_states.device)
|
| 1155 |
+
|
| 1156 |
+
# Prepare head mask if needed
|
| 1157 |
+
# 1.0 in head_mask indicate we keep the head
|
| 1158 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 1159 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 1160 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 1161 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 1162 |
+
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
|
| 1163 |
+
attention_mask, (batch_size, seq_length), hidden_states.device
|
| 1164 |
+
)
|
| 1165 |
+
|
| 1166 |
+
encoder_outputs = self.encoder(
|
| 1167 |
+
hidden_states,
|
| 1168 |
+
attention_mask=extended_attention_mask,
|
| 1169 |
+
head_mask=head_mask,
|
| 1170 |
+
output_attentions=output_attentions,
|
| 1171 |
+
output_hidden_states=output_hidden_states,
|
| 1172 |
+
return_dict=return_dict,
|
| 1173 |
+
)
|
| 1174 |
+
sequence_output = encoder_outputs[0]
|
| 1175 |
+
sequence_output = self.layernorm(sequence_output)
|
| 1176 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
| 1177 |
+
|
| 1178 |
+
if not return_dict:
|
| 1179 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 1180 |
+
|
| 1181 |
+
return BaseModelOutputWithPooling(
|
| 1182 |
+
last_hidden_state=sequence_output,
|
| 1183 |
+
pooler_output=pooled_output,
|
| 1184 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 1185 |
+
attentions=encoder_outputs.attentions,
|
| 1186 |
+
)
|
| 1187 |
+
|
| 1188 |
+
|
| 1189 |
+
@add_start_docstrings(
|
| 1190 |
+
"The bare FLAVA Model transformer outputting raw hidden-states without any specific head on top.",
|
| 1191 |
+
FLAVA_START_DOCSTRING.format(config="FlavaConfig"),
|
| 1192 |
+
)
|
| 1193 |
+
class FlavaModel(FlavaPreTrainedModel):
|
| 1194 |
+
config_class = FlavaConfig
|
| 1195 |
+
|
| 1196 |
+
def __init__(self, config: FlavaConfig):
|
| 1197 |
+
super().__init__(config)
|
| 1198 |
+
|
| 1199 |
+
if not isinstance(config.text_config, FlavaTextConfig):
|
| 1200 |
+
raise ValueError(
|
| 1201 |
+
"config.text_config is expected to be of type FlavaTextConfig but is of type"
|
| 1202 |
+
f" {type(config.text_config)}."
|
| 1203 |
+
)
|
| 1204 |
+
|
| 1205 |
+
if not isinstance(config.image_config, FlavaImageConfig):
|
| 1206 |
+
raise ValueError(
|
| 1207 |
+
"config.image_config is expected to be of type FlavaImageConfig but is of type"
|
| 1208 |
+
f" {type(config.image_config)}."
|
| 1209 |
+
)
|
| 1210 |
+
|
| 1211 |
+
if not isinstance(config.multimodal_config, FlavaMultimodalConfig):
|
| 1212 |
+
raise ValueError(
|
| 1213 |
+
"config.multimodal_config is expected to be of type FlavaMultimodalConfig but "
|
| 1214 |
+
+ f"is of type {type(config.multimodal_config)}."
|
| 1215 |
+
)
|
| 1216 |
+
|
| 1217 |
+
text_config = config.text_config
|
| 1218 |
+
image_config = config.image_config
|
| 1219 |
+
multimodal_config = config.multimodal_config
|
| 1220 |
+
|
| 1221 |
+
self.projection_dim = config.projection_dim
|
| 1222 |
+
self.text_hidden_size = text_config.hidden_size
|
| 1223 |
+
self.image_hidden_size = image_config.hidden_size
|
| 1224 |
+
self.mm_hidden_size = multimodal_config.hidden_size
|
| 1225 |
+
|
| 1226 |
+
self.text_model = FlavaTextModel(text_config)
|
| 1227 |
+
self.image_model = FlavaImageModel(image_config)
|
| 1228 |
+
self.multimodal_model = FlavaMultimodalModel(multimodal_config)
|
| 1229 |
+
|
| 1230 |
+
self.image_projection = nn.Linear(self.image_hidden_size, self.projection_dim)
|
| 1231 |
+
self.text_projection = nn.Linear(self.text_hidden_size, self.projection_dim)
|
| 1232 |
+
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
|
| 1233 |
+
|
| 1234 |
+
self.image_to_mm_projection = nn.Linear(self.image_hidden_size, self.mm_hidden_size)
|
| 1235 |
+
self.text_to_mm_projection = nn.Linear(self.text_hidden_size, self.mm_hidden_size)
|
| 1236 |
+
# Initialize weights and apply final processing
|
| 1237 |
+
self.post_init()
|
| 1238 |
+
|
| 1239 |
+
@add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length"))
|
| 1240 |
+
def get_text_features(
|
| 1241 |
+
self,
|
| 1242 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1243 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1244 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1245 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1246 |
+
output_attentions: Optional[bool] = None,
|
| 1247 |
+
output_hidden_states: Optional[bool] = None,
|
| 1248 |
+
return_dict: Optional[bool] = None,
|
| 1249 |
+
) -> torch.FloatTensor:
|
| 1250 |
+
r"""
|
| 1251 |
+
Returns:
|
| 1252 |
+
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
|
| 1253 |
+
applying the projection layer to the pooled output of [`FlavaTextModel`].
|
| 1254 |
+
|
| 1255 |
+
Examples:
|
| 1256 |
+
|
| 1257 |
+
```python
|
| 1258 |
+
>>> from transformers import AutoProcessor, FlavaModel
|
| 1259 |
+
|
| 1260 |
+
>>> model = FlavaModel.from_pretrained("{0}")
|
| 1261 |
+
>>> processor = AutoProcessor.from_pretrained("{0}")
|
| 1262 |
+
|
| 1263 |
+
>>> inputs = processor(
|
| 1264 |
+
... text=["a photo of a cat", "a photo of a dog"], max_length=77, padding="max_length", return_tensors="pt"
|
| 1265 |
+
... )
|
| 1266 |
+
>>> text_features = model.get_text_features(**inputs)
|
| 1267 |
+
```""".format(
|
| 1268 |
+
_CHECKPOINT_FOR_DOC
|
| 1269 |
+
)
|
| 1270 |
+
text_outputs = self.text_model(
|
| 1271 |
+
input_ids=input_ids,
|
| 1272 |
+
attention_mask=attention_mask,
|
| 1273 |
+
token_type_ids=token_type_ids,
|
| 1274 |
+
position_ids=position_ids,
|
| 1275 |
+
output_attentions=output_attentions,
|
| 1276 |
+
output_hidden_states=output_hidden_states,
|
| 1277 |
+
return_dict=return_dict,
|
| 1278 |
+
)
|
| 1279 |
+
|
| 1280 |
+
pooled_output = text_outputs[0] # last_hidden_state
|
| 1281 |
+
text_features = self.text_projection(pooled_output)
|
| 1282 |
+
|
| 1283 |
+
return text_features
|
| 1284 |
+
|
| 1285 |
+
@add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches"))
|
| 1286 |
+
def get_image_features(
|
| 1287 |
+
self,
|
| 1288 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 1289 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
| 1290 |
+
interpolate_pos_encoding: Optional[bool] = None,
|
| 1291 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1292 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1293 |
+
output_attentions: Optional[bool] = None,
|
| 1294 |
+
output_hidden_states: Optional[bool] = None,
|
| 1295 |
+
return_dict: Optional[bool] = None,
|
| 1296 |
+
) -> torch.FloatTensor:
|
| 1297 |
+
r"""
|
| 1298 |
+
Returns:
|
| 1299 |
+
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
|
| 1300 |
+
applying the projection layer to the pooled output of [`FlavaImageModel`].
|
| 1301 |
+
|
| 1302 |
+
Examples:
|
| 1303 |
+
|
| 1304 |
+
```python
|
| 1305 |
+
>>> from PIL import Image
|
| 1306 |
+
>>> import requests
|
| 1307 |
+
>>> from transformers import AutoProcessor, FlavaModel
|
| 1308 |
+
|
| 1309 |
+
>>> model = FlavaModel.from_pretrained("{0}")
|
| 1310 |
+
>>> processor = AutoProcessor.from_pretrained("{0}")
|
| 1311 |
+
|
| 1312 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1313 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1314 |
+
|
| 1315 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
| 1316 |
+
|
| 1317 |
+
>>> image_features = model.get_image_features(**inputs)
|
| 1318 |
+
```""".format(
|
| 1319 |
+
_CHECKPOINT_FOR_DOC
|
| 1320 |
+
)
|
| 1321 |
+
image_outputs = self.image_model(
|
| 1322 |
+
pixel_values=pixel_values,
|
| 1323 |
+
bool_masked_pos=bool_masked_pos,
|
| 1324 |
+
attention_mask=attention_mask,
|
| 1325 |
+
head_mask=head_mask,
|
| 1326 |
+
output_attentions=output_attentions,
|
| 1327 |
+
output_hidden_states=output_hidden_states,
|
| 1328 |
+
interpolate_pos_encoding=interpolate_pos_encoding,
|
| 1329 |
+
return_dict=return_dict,
|
| 1330 |
+
)
|
| 1331 |
+
|
| 1332 |
+
pooled_output = image_outputs[0] # last_hidden_state
|
| 1333 |
+
image_features = self.image_projection(pooled_output)
|
| 1334 |
+
|
| 1335 |
+
return image_features
|
| 1336 |
+
|
| 1337 |
+
@add_start_docstrings_to_model_forward(
|
| 1338 |
+
FLAVA_MODEL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len")
|
| 1339 |
+
)
|
| 1340 |
+
@replace_return_docstrings(output_type=FlavaModelOutput, config_class=FlavaConfig)
|
| 1341 |
+
def forward(
|
| 1342 |
+
self,
|
| 1343 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1344 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 1345 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1346 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1347 |
+
bool_masked_pos: Optional[torch.Tensor] = None,
|
| 1348 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1349 |
+
image_attention_mask: Optional[torch.Tensor] = None,
|
| 1350 |
+
skip_multimodal_encoder: Optional[bool] = None,
|
| 1351 |
+
output_attentions: Optional[bool] = None,
|
| 1352 |
+
output_hidden_states: bool = True,
|
| 1353 |
+
return_dict: Optional[bool] = None,
|
| 1354 |
+
) -> Union[Tuple, FlavaOutput]:
|
| 1355 |
+
r"""
|
| 1356 |
+
Returns:
|
| 1357 |
+
|
| 1358 |
+
Examples:
|
| 1359 |
+
|
| 1360 |
+
```python
|
| 1361 |
+
>>> from PIL import Image
|
| 1362 |
+
>>> import requests
|
| 1363 |
+
>>> from transformers import AutoProcessor, FlavaModel
|
| 1364 |
+
|
| 1365 |
+
>>> model = FlavaModel.from_pretrained("facebook/flava-full")
|
| 1366 |
+
>>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
|
| 1367 |
+
|
| 1368 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1369 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1370 |
+
|
| 1371 |
+
>>> inputs = processor(text=["a photo of a cat"], images=image, return_tensors="pt", padding=True)
|
| 1372 |
+
|
| 1373 |
+
>>> outputs = model(**inputs)
|
| 1374 |
+
>>> logits_per_image = outputs.contrastive_logits_per_image # this is the image-text similarity score
|
| 1375 |
+
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
| 1376 |
+
```
|
| 1377 |
+
"""
|
| 1378 |
+
|
| 1379 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
| 1380 |
+
if not output_hidden_states:
|
| 1381 |
+
raise ValueError("FLAVA model requires hidden states to work. Please set `output_hidden_states=True`")
|
| 1382 |
+
image_embeddings = None
|
| 1383 |
+
image_states = None
|
| 1384 |
+
image_mm_projection = None
|
| 1385 |
+
image_output = None
|
| 1386 |
+
if pixel_values is not None:
|
| 1387 |
+
image_output = self.image_model(
|
| 1388 |
+
pixel_values=pixel_values,
|
| 1389 |
+
bool_masked_pos=bool_masked_pos,
|
| 1390 |
+
attention_mask=image_attention_mask,
|
| 1391 |
+
output_attentions=output_attentions,
|
| 1392 |
+
output_hidden_states=output_hidden_states,
|
| 1393 |
+
return_dict=return_dict,
|
| 1394 |
+
)
|
| 1395 |
+
image_embeddings, image_states = image_output[0], image_output[2]
|
| 1396 |
+
# Note that these states don't use final layernorm in the transformer model
|
| 1397 |
+
image_mm_projection = self.image_to_mm_projection(image_states[-1])
|
| 1398 |
+
|
| 1399 |
+
text_embeddings = None
|
| 1400 |
+
text_states = None
|
| 1401 |
+
text_mm_projection = None
|
| 1402 |
+
text_output = None
|
| 1403 |
+
if input_ids is not None:
|
| 1404 |
+
text_output = self.text_model(
|
| 1405 |
+
input_ids=input_ids,
|
| 1406 |
+
attention_mask=attention_mask,
|
| 1407 |
+
position_ids=position_ids,
|
| 1408 |
+
token_type_ids=token_type_ids,
|
| 1409 |
+
output_attentions=output_attentions,
|
| 1410 |
+
output_hidden_states=output_hidden_states,
|
| 1411 |
+
return_dict=return_dict,
|
| 1412 |
+
)
|
| 1413 |
+
|
| 1414 |
+
text_embeddings, text_states = text_output[0], text_output[2]
|
| 1415 |
+
# Note that these states don't use final layernorm in the transformer model
|
| 1416 |
+
text_mm_projection = self.text_to_mm_projection(text_states[-1])
|
| 1417 |
+
|
| 1418 |
+
multimodal_embeddings = None
|
| 1419 |
+
multimodal_output = None
|
| 1420 |
+
if image_mm_projection is not None and text_mm_projection is not None and not skip_multimodal_encoder:
|
| 1421 |
+
multimodal_input = torch.cat([image_mm_projection, text_mm_projection], dim=1)
|
| 1422 |
+
multimodal_output = self.multimodal_model(multimodal_input, return_dict=return_dict)
|
| 1423 |
+
multimodal_embeddings = multimodal_output[0]
|
| 1424 |
+
|
| 1425 |
+
if not return_dict:
|
| 1426 |
+
return (
|
| 1427 |
+
image_embeddings,
|
| 1428 |
+
image_output,
|
| 1429 |
+
text_embeddings,
|
| 1430 |
+
text_output,
|
| 1431 |
+
multimodal_embeddings,
|
| 1432 |
+
multimodal_output,
|
| 1433 |
+
)
|
| 1434 |
+
|
| 1435 |
+
return FlavaModelOutput(
|
| 1436 |
+
image_embeddings=image_embeddings,
|
| 1437 |
+
image_output=image_output,
|
| 1438 |
+
text_embeddings=text_embeddings,
|
| 1439 |
+
text_output=text_output,
|
| 1440 |
+
multimodal_embeddings=multimodal_embeddings,
|
| 1441 |
+
multimodal_output=multimodal_output,
|
| 1442 |
+
)
|
| 1443 |
+
|
| 1444 |
+
|
| 1445 |
+
class FlavaImageCodebookResPath(nn.Module):
|
| 1446 |
+
def __init__(self, in_size: int, out_size: int, **kwargs):
|
| 1447 |
+
super().__init__()
|
| 1448 |
+
hid_size = out_size // 4
|
| 1449 |
+
|
| 1450 |
+
path = OrderedDict()
|
| 1451 |
+
path["relu_1"] = nn.ReLU()
|
| 1452 |
+
path["conv_1"] = nn.Conv2d(in_size, hid_size, kernel_size=3, padding=1)
|
| 1453 |
+
path["relu_2"] = nn.ReLU()
|
| 1454 |
+
path["conv_2"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
|
| 1455 |
+
path["relu_3"] = nn.ReLU()
|
| 1456 |
+
path["conv_3"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
|
| 1457 |
+
path["relu_4"] = nn.ReLU()
|
| 1458 |
+
path["conv_4"] = nn.Conv2d(hid_size, out_size, kernel_size=1, padding=0)
|
| 1459 |
+
|
| 1460 |
+
self.path = nn.Sequential(path)
|
| 1461 |
+
|
| 1462 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 1463 |
+
return self.path(x)
|
| 1464 |
+
|
| 1465 |
+
|
| 1466 |
+
class FlavaImageCodebookBlock(nn.Module):
|
| 1467 |
+
def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs):
|
| 1468 |
+
super().__init__()
|
| 1469 |
+
|
| 1470 |
+
self.post_gain = 1 / (num_layers**2)
|
| 1471 |
+
|
| 1472 |
+
if in_size != out_size:
|
| 1473 |
+
self.id_path = nn.Conv2d(in_size, out_size, kernel_size=1, padding=0)
|
| 1474 |
+
else:
|
| 1475 |
+
self.id_path = nn.Identity()
|
| 1476 |
+
|
| 1477 |
+
self.res_path = FlavaImageCodebookResPath(in_size, out_size)
|
| 1478 |
+
|
| 1479 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 1480 |
+
return self.id_path(x) + self.post_gain * self.res_path(x)
|
| 1481 |
+
|
| 1482 |
+
|
| 1483 |
+
class FlavaImageCodebookLayerGroup(nn.Module):
|
| 1484 |
+
def __init__(self, num_blocks: int, num_layers: int, in_size: int, out_size: int, use_pool: bool = True):
|
| 1485 |
+
super().__init__()
|
| 1486 |
+
blocks = OrderedDict()
|
| 1487 |
+
for i in range(num_blocks):
|
| 1488 |
+
if i == 0:
|
| 1489 |
+
blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(in_size, out_size, num_layers)
|
| 1490 |
+
else:
|
| 1491 |
+
blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(out_size, out_size, num_layers)
|
| 1492 |
+
|
| 1493 |
+
if use_pool:
|
| 1494 |
+
blocks["pool"] = nn.MaxPool2d(kernel_size=2)
|
| 1495 |
+
|
| 1496 |
+
self.group = nn.Sequential(blocks)
|
| 1497 |
+
|
| 1498 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 1499 |
+
return self.group(x)
|
| 1500 |
+
|
| 1501 |
+
|
| 1502 |
+
# Inspired by DALLE Encoder in https://github.com/openai/DALL-E/blob/5be4b236bc3ade6943662354117a0e83752cc322/dall_e/encoder.py#L42
|
| 1503 |
+
@add_start_docstrings(
|
| 1504 |
+
"""
|
| 1505 |
+
The FLAVA's image codebook model inspired from DALL-E's original encoder. Outputs raw hidden states and can be used
|
| 1506 |
+
to generate image tokens for an image based on DALL-E's vocab. Used to generate labels for MIM. Use
|
| 1507 |
+
`get_codebook_indices` to get image tokens for an image.
|
| 1508 |
+
""",
|
| 1509 |
+
FLAVA_START_DOCSTRING.format(config="FlavaImageCodebookConfig"),
|
| 1510 |
+
)
|
| 1511 |
+
class FlavaImageCodebook(FlavaPreTrainedModel):
|
| 1512 |
+
base_model_prefix = ""
|
| 1513 |
+
config_class = FlavaImageCodebookConfig
|
| 1514 |
+
main_input_name = "pixel_values"
|
| 1515 |
+
supports_gradient_checkpointing = False
|
| 1516 |
+
|
| 1517 |
+
def __init__(
|
| 1518 |
+
self,
|
| 1519 |
+
config: FlavaImageCodebookConfig,
|
| 1520 |
+
**kwargs: Any,
|
| 1521 |
+
):
|
| 1522 |
+
super().__init__(config)
|
| 1523 |
+
|
| 1524 |
+
self.config = config
|
| 1525 |
+
self.num_groups = config.num_groups
|
| 1526 |
+
self.input_channels = config.input_channels
|
| 1527 |
+
self.num_blocks_per_group = config.num_blocks_per_group
|
| 1528 |
+
self.hidden_size = config.hidden_size
|
| 1529 |
+
self.vocab_size = config.vocab_size
|
| 1530 |
+
|
| 1531 |
+
num_layers = self.num_groups * self.num_blocks_per_group
|
| 1532 |
+
|
| 1533 |
+
output_blocks = OrderedDict()
|
| 1534 |
+
output_blocks["relu"] = nn.ReLU()
|
| 1535 |
+
output_blocks["conv"] = nn.Conv2d(8 * self.hidden_size, self.vocab_size, kernel_size=1, padding=0)
|
| 1536 |
+
|
| 1537 |
+
blocks = OrderedDict()
|
| 1538 |
+
blocks["input"] = nn.Conv2d(self.input_channels, 1 * self.hidden_size, kernel_size=7, padding=3)
|
| 1539 |
+
blocks["group_1"] = FlavaImageCodebookLayerGroup(
|
| 1540 |
+
self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 1 * self.hidden_size
|
| 1541 |
+
)
|
| 1542 |
+
blocks["group_2"] = FlavaImageCodebookLayerGroup(
|
| 1543 |
+
self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 2 * self.hidden_size
|
| 1544 |
+
)
|
| 1545 |
+
blocks["group_3"] = FlavaImageCodebookLayerGroup(
|
| 1546 |
+
self.num_blocks_per_group, num_layers, 2 * self.hidden_size, 4 * self.hidden_size
|
| 1547 |
+
)
|
| 1548 |
+
blocks["group_4"] = FlavaImageCodebookLayerGroup(
|
| 1549 |
+
self.num_blocks_per_group, num_layers, 4 * self.hidden_size, 8 * self.hidden_size, use_pool=False
|
| 1550 |
+
)
|
| 1551 |
+
blocks["output"] = nn.Sequential(output_blocks)
|
| 1552 |
+
|
| 1553 |
+
self.blocks = nn.Sequential(blocks)
|
| 1554 |
+
|
| 1555 |
+
self.post_init()
|
| 1556 |
+
|
| 1557 |
+
if self.config.freeze:
|
| 1558 |
+
for param in self.parameters():
|
| 1559 |
+
param.requires_grad = False
|
| 1560 |
+
|
| 1561 |
+
def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor:
|
| 1562 |
+
"""
|
| 1563 |
+
Args:
|
| 1564 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 1565 |
+
Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing
|
| 1566 |
+
`return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.
|
| 1567 |
+
|
| 1568 |
+
Examples:
|
| 1569 |
+
```python
|
| 1570 |
+
>>> from PIL import Image
|
| 1571 |
+
>>> import requests
|
| 1572 |
+
>>> from transformers import AutoImageProcessor, FlavaImageCodebook
|
| 1573 |
+
|
| 1574 |
+
>>> model = FlavaImageCodebook.from_pretrained("{0}")
|
| 1575 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("{0}")
|
| 1576 |
+
|
| 1577 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1578 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1579 |
+
|
| 1580 |
+
>>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")
|
| 1581 |
+
>>> inputs = dict(pixel_values=inputs.codebook_pixel_values)
|
| 1582 |
+
|
| 1583 |
+
>>> outputs = model.get_codebook_indices(**inputs)
|
| 1584 |
+
```
|
| 1585 |
+
""".format(
|
| 1586 |
+
_CHECKPOINT_FOR_CODEBOOK_DOC
|
| 1587 |
+
)
|
| 1588 |
+
z_logits = self.blocks(pixel_values)
|
| 1589 |
+
return torch.argmax(z_logits, axis=1)
|
| 1590 |
+
|
| 1591 |
+
def get_codebook_probs(self, pixel_values: torch.Tensor) -> torch.Tensor:
|
| 1592 |
+
z_logits = self.blocks(pixel_values)
|
| 1593 |
+
return nn.Softmax(dim=1)(z_logits)
|
| 1594 |
+
|
| 1595 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
| 1596 |
+
"""
|
| 1597 |
+
Args:
|
| 1598 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 1599 |
+
Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing
|
| 1600 |
+
`return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.
|
| 1601 |
+
|
| 1602 |
+
Examples:
|
| 1603 |
+
|
| 1604 |
+
```python
|
| 1605 |
+
>>> from PIL import Image
|
| 1606 |
+
>>> import requests
|
| 1607 |
+
>>> from transformers import AutoImageProcessor, FlavaImageCodebook
|
| 1608 |
+
|
| 1609 |
+
>>> model = FlavaImageCodebook.from_pretrained("{0}")
|
| 1610 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("{0}")
|
| 1611 |
+
|
| 1612 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1613 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1614 |
+
|
| 1615 |
+
>>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")
|
| 1616 |
+
>>> inputs = dict(pixel_values=inputs.codebook_pixel_values)
|
| 1617 |
+
|
| 1618 |
+
>>> outputs = model(**inputs)
|
| 1619 |
+
>>> print(outputs.shape)
|
| 1620 |
+
(1, 196)
|
| 1621 |
+
```
|
| 1622 |
+
""".format(
|
| 1623 |
+
_CHECKPOINT_FOR_CODEBOOK_DOC
|
| 1624 |
+
)
|
| 1625 |
+
if len(pixel_values.shape) != 4:
|
| 1626 |
+
raise ValueError(f"input shape {pixel_values.shape} is not 4d")
|
| 1627 |
+
if pixel_values.shape[1] != self.input_channels:
|
| 1628 |
+
raise ValueError(f"input has {pixel_values.shape[1]} channels but model built for {self.input_channels}")
|
| 1629 |
+
return self.blocks(pixel_values)
|
| 1630 |
+
|
| 1631 |
+
|
| 1632 |
+
class FlavaPredictionHeadTransform(nn.Module):
|
| 1633 |
+
def __init__(self, config):
|
| 1634 |
+
super().__init__()
|
| 1635 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 1636 |
+
if isinstance(config.hidden_act, str):
|
| 1637 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
| 1638 |
+
else:
|
| 1639 |
+
self.transform_act_fn = config.hidden_act
|
| 1640 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 1641 |
+
|
| 1642 |
+
def forward(self, hidden_states):
|
| 1643 |
+
hidden_states = self.dense(hidden_states)
|
| 1644 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
| 1645 |
+
hidden_states = self.LayerNorm(hidden_states)
|
| 1646 |
+
return hidden_states
|
| 1647 |
+
|
| 1648 |
+
|
| 1649 |
+
class FlavaMaskedPredictionHead(nn.Module):
|
| 1650 |
+
def __init__(self, config, weight=None):
|
| 1651 |
+
super().__init__()
|
| 1652 |
+
self.config = config
|
| 1653 |
+
self.transform = FlavaPredictionHeadTransform(config)
|
| 1654 |
+
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 1655 |
+
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
| 1656 |
+
if weight is not None:
|
| 1657 |
+
self.decoder.weight = weight
|
| 1658 |
+
|
| 1659 |
+
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
| 1660 |
+
self.decoder.bias = self.bias
|
| 1661 |
+
|
| 1662 |
+
def forward(self, x):
|
| 1663 |
+
x = self.transform(x)
|
| 1664 |
+
x = self.decoder(x)
|
| 1665 |
+
return x
|
| 1666 |
+
|
| 1667 |
+
|
| 1668 |
+
class FlavaITMHead(nn.Module):
|
| 1669 |
+
def __init__(self, config):
|
| 1670 |
+
super().__init__()
|
| 1671 |
+
self.config = config
|
| 1672 |
+
self.pooler = FlavaPooler(config)
|
| 1673 |
+
self.seq_relationship = nn.Linear(config.hidden_size, 2)
|
| 1674 |
+
|
| 1675 |
+
def forward(self, x):
|
| 1676 |
+
x = self.pooler(x)
|
| 1677 |
+
x = self.seq_relationship(x)
|
| 1678 |
+
return x
|
| 1679 |
+
|
| 1680 |
+
|
| 1681 |
+
class FlavaGlobalContrastiveHead(nn.Module):
|
| 1682 |
+
def __init__(self, config):
|
| 1683 |
+
super().__init__()
|
| 1684 |
+
self.config = config
|
| 1685 |
+
self.global_backprop_contrastive = config.global_backprop_contrastive
|
| 1686 |
+
|
| 1687 |
+
def forward(self, image_embeddings, text_embeddings, logit_scale):
|
| 1688 |
+
temperature = torch.exp(logit_scale)
|
| 1689 |
+
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
|
| 1690 |
+
labels = torch.arange(image_embeddings.size(0), device=image_embeddings.device)
|
| 1691 |
+
image_embeddings_all = [image_embeddings]
|
| 1692 |
+
text_embeddings_all = [text_embeddings]
|
| 1693 |
+
else:
|
| 1694 |
+
local_batch_size = image_embeddings.size(0)
|
| 1695 |
+
world_size = torch.distributed.get_world_size()
|
| 1696 |
+
|
| 1697 |
+
if self.global_backprop_contrastive:
|
| 1698 |
+
# `torch.distributed.nn.functional.all_gather` does backprop on all active workers
|
| 1699 |
+
# whereas `torch.distributed.all_gather` does only backpropagates on the current worker.
|
| 1700 |
+
image_embeddings_all = torch.distributed.nn.functional.all_gather(image_embeddings)
|
| 1701 |
+
text_embeddings_all = torch.distributed.nn.functional.all_gather(text_embeddings)
|
| 1702 |
+
else:
|
| 1703 |
+
image_embeddings_all = [torch.zeros_like(text_embeddings) for _ in range(world_size)]
|
| 1704 |
+
text_embeddings_all = [torch.zeros_like(image_embeddings) for _ in range(world_size)]
|
| 1705 |
+
torch.distributed.all_gather(image_embeddings_all, image_embeddings)
|
| 1706 |
+
torch.distributed.all_gather(text_embeddings_all, text_embeddings)
|
| 1707 |
+
|
| 1708 |
+
labels = local_batch_size * torch.distributed.get_rank() + torch.arange(
|
| 1709 |
+
local_batch_size, device=image_embeddings.device
|
| 1710 |
+
)
|
| 1711 |
+
|
| 1712 |
+
image_embeddings_all = torch.cat(image_embeddings_all)
|
| 1713 |
+
text_embeddings_all = torch.cat(text_embeddings_all)
|
| 1714 |
+
|
| 1715 |
+
logits_per_image = torch.matmul(image_embeddings, text_embeddings_all.transpose(0, 1)) * temperature
|
| 1716 |
+
logits_per_text = torch.matmul(text_embeddings, image_embeddings_all.transpose(0, 1)) * temperature
|
| 1717 |
+
|
| 1718 |
+
return logits_per_image, logits_per_text, labels
|
| 1719 |
+
|
| 1720 |
+
|
| 1721 |
+
@add_start_docstrings(
|
| 1722 |
+
"""
|
| 1723 |
+
The FLAVA model for pretraining which outputs losses, embeddings, logits and transformer outputs.
|
| 1724 |
+
""",
|
| 1725 |
+
FLAVA_START_DOCSTRING.format(config="FlavaConfig") + FLAVA_PRETRAINING_START_DOCSTRING_EXTRA,
|
| 1726 |
+
)
|
| 1727 |
+
class FlavaForPreTraining(FlavaPreTrainedModel):
|
| 1728 |
+
# Those are linked to xxx.bias
|
| 1729 |
+
_tied_weights_keys = [
|
| 1730 |
+
"mmm_text_head.decoder.bias",
|
| 1731 |
+
"mmm_image_head.decoder.bias",
|
| 1732 |
+
"mlm_head.decoder.bias",
|
| 1733 |
+
"mim_head.decoder.bias",
|
| 1734 |
+
]
|
| 1735 |
+
|
| 1736 |
+
def __init__(self, config: FlavaConfig, image_codebook: Optional[nn.Module] = None):
|
| 1737 |
+
super().__init__(config)
|
| 1738 |
+
self.flava = FlavaModel(config)
|
| 1739 |
+
|
| 1740 |
+
self.image_codebook = image_codebook
|
| 1741 |
+
if self.image_codebook is None and config.init_codebook:
|
| 1742 |
+
self.image_codebook = FlavaImageCodebook(config.image_codebook_config)
|
| 1743 |
+
|
| 1744 |
+
# Levarage text and image encoder configs to create the masked
|
| 1745 |
+
# head since it has the right vocab
|
| 1746 |
+
self.mim_head = FlavaMaskedPredictionHead(config.image_config)
|
| 1747 |
+
self.mlm_head = FlavaMaskedPredictionHead(config.text_config)
|
| 1748 |
+
self.itm_head = FlavaITMHead(config)
|
| 1749 |
+
self.mmm_image_head = FlavaMaskedPredictionHead(config.image_config)
|
| 1750 |
+
self.mmm_text_head = FlavaMaskedPredictionHead(config.text_config)
|
| 1751 |
+
self.global_contrastive_head = FlavaGlobalContrastiveHead(config)
|
| 1752 |
+
|
| 1753 |
+
self.image_vocab_size = config.image_config.vocab_size
|
| 1754 |
+
self.text_vocab_size = config.text_config.vocab_size
|
| 1755 |
+
self.mlm_weight = config.mlm_weight
|
| 1756 |
+
self.mim_weight = config.mim_weight
|
| 1757 |
+
self.global_contrastive_weight = config.global_contrastive_weight
|
| 1758 |
+
self.ce_ignore_index = config.ce_ignore_index
|
| 1759 |
+
self.itm_weight = config.itm_weight
|
| 1760 |
+
self.mmm_image_weight = config.mmm_image_weight
|
| 1761 |
+
self.mmm_text_weight = config.mmm_text_weight
|
| 1762 |
+
self.skip_unmasked_multimodal_encoder = config.skip_unmasked_multimodal_encoder
|
| 1763 |
+
|
| 1764 |
+
self.post_init()
|
| 1765 |
+
|
| 1766 |
+
def _resize_to_2d(self, x: torch.Tensor):
|
| 1767 |
+
if x.dim() > 2:
|
| 1768 |
+
x = x.view(x.size(0), -1)
|
| 1769 |
+
return x
|
| 1770 |
+
|
| 1771 |
+
@add_start_docstrings_to_model_forward(
|
| 1772 |
+
FLAVA_PRETRAINING_INPUTS_DOCSTRING.format("batch_size, text_seq_len", "batch_size, image_num_patches")
|
| 1773 |
+
)
|
| 1774 |
+
@replace_return_docstrings(output_type=FlavaForPreTrainingOutput, config_class=FlavaConfig)
|
| 1775 |
+
def forward(
|
| 1776 |
+
self,
|
| 1777 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1778 |
+
input_ids_masked: Optional[torch.LongTensor] = None,
|
| 1779 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 1780 |
+
codebook_pixel_values: Optional[torch.FloatTensor] = None,
|
| 1781 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1782 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1783 |
+
bool_masked_pos: Optional[torch.Tensor] = None,
|
| 1784 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1785 |
+
image_attention_mask: Optional[torch.Tensor] = None,
|
| 1786 |
+
skip_unmasked_multimodal_encoder: bool = None,
|
| 1787 |
+
mlm_labels: Optional[torch.Tensor] = None,
|
| 1788 |
+
mim_labels: Optional[torch.Tensor] = None,
|
| 1789 |
+
itm_labels: Optional[torch.Tensor] = None,
|
| 1790 |
+
output_attentions: Optional[bool] = None,
|
| 1791 |
+
output_hidden_states: bool = True,
|
| 1792 |
+
return_dict: Optional[bool] = None,
|
| 1793 |
+
return_loss: Optional[bool] = None,
|
| 1794 |
+
) -> Union[Tuple[torch.Tensor], FlavaForPreTrainingOutput]:
|
| 1795 |
+
"""
|
| 1796 |
+
Examples:
|
| 1797 |
+
```python
|
| 1798 |
+
>>> from PIL import Image
|
| 1799 |
+
>>> import requests
|
| 1800 |
+
>>> from transformers import FlavaForPreTraining, AutoProcessor
|
| 1801 |
+
|
| 1802 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1803 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1804 |
+
|
| 1805 |
+
>>> model = FlavaForPreTraining.from_pretrained("facebook/flava-full")
|
| 1806 |
+
>>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
|
| 1807 |
+
|
| 1808 |
+
>>> text = ["a photo of a cat"]
|
| 1809 |
+
|
| 1810 |
+
>>> inputs = processor(
|
| 1811 |
+
... images=[image],
|
| 1812 |
+
... text=text,
|
| 1813 |
+
... return_masks=True,
|
| 1814 |
+
... return_codebook_pixels=True,
|
| 1815 |
+
... padding=True,
|
| 1816 |
+
... max_length=77,
|
| 1817 |
+
... return_tensors="pt",
|
| 1818 |
+
... )
|
| 1819 |
+
|
| 1820 |
+
|
| 1821 |
+
>>> output = model(**inputs)
|
| 1822 |
+
```
|
| 1823 |
+
|
| 1824 |
+
Return:
|
| 1825 |
+
|
| 1826 |
+
"""
|
| 1827 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1828 |
+
return_loss = return_loss if return_loss is not None else self.config.return_loss
|
| 1829 |
+
|
| 1830 |
+
skip_unmasked_multimodal_encoder = (
|
| 1831 |
+
skip_unmasked_multimodal_encoder
|
| 1832 |
+
if skip_unmasked_multimodal_encoder is not None
|
| 1833 |
+
else self.skip_unmasked_multimodal_encoder
|
| 1834 |
+
)
|
| 1835 |
+
|
| 1836 |
+
if input_ids_masked is None and input_ids is not None:
|
| 1837 |
+
logger.warning(
|
| 1838 |
+
"`input_ids_masked` isn't passed which means MLM loss won't be calculated correctlySetting it to"
|
| 1839 |
+
" `input_ids` so that model can work. Please pass it if this is unintentional. This is usually OKAY if"
|
| 1840 |
+
" you are doing inference on unmasked text..."
|
| 1841 |
+
)
|
| 1842 |
+
input_ids_masked = input_ids
|
| 1843 |
+
|
| 1844 |
+
flava_output = self.flava(
|
| 1845 |
+
input_ids=input_ids,
|
| 1846 |
+
pixel_values=pixel_values,
|
| 1847 |
+
attention_mask=attention_mask,
|
| 1848 |
+
token_type_ids=token_type_ids,
|
| 1849 |
+
position_ids=position_ids,
|
| 1850 |
+
image_attention_mask=image_attention_mask,
|
| 1851 |
+
# Don't need unmasked multimodal embedding for anything so skip it
|
| 1852 |
+
# NOTE: ITM uses masked version
|
| 1853 |
+
skip_multimodal_encoder=skip_unmasked_multimodal_encoder,
|
| 1854 |
+
output_attentions=output_attentions,
|
| 1855 |
+
output_hidden_states=output_hidden_states,
|
| 1856 |
+
# Pass true to have deterministic outputs
|
| 1857 |
+
return_dict=True,
|
| 1858 |
+
)
|
| 1859 |
+
|
| 1860 |
+
flava_masked_output = self.flava(
|
| 1861 |
+
input_ids=input_ids_masked,
|
| 1862 |
+
pixel_values=pixel_values,
|
| 1863 |
+
attention_mask=attention_mask,
|
| 1864 |
+
token_type_ids=token_type_ids,
|
| 1865 |
+
image_attention_mask=image_attention_mask,
|
| 1866 |
+
bool_masked_pos=bool_masked_pos,
|
| 1867 |
+
output_attentions=output_attentions,
|
| 1868 |
+
output_hidden_states=output_hidden_states,
|
| 1869 |
+
return_dict=True,
|
| 1870 |
+
)
|
| 1871 |
+
|
| 1872 |
+
pos_mask = None
|
| 1873 |
+
|
| 1874 |
+
image_embeddings = flava_output.image_embeddings
|
| 1875 |
+
text_embeddings = flava_output.text_embeddings
|
| 1876 |
+
image_masked_embeddings = flava_masked_output.image_embeddings
|
| 1877 |
+
text_masked_embeddings = flava_masked_output.text_embeddings
|
| 1878 |
+
multimodal_masked_embeddings = flava_masked_output.multimodal_embeddings
|
| 1879 |
+
|
| 1880 |
+
total_loss = mim_loss = mlm_loss = mmm_text_loss = mmm_image_loss = gc_loss = itm_loss = None
|
| 1881 |
+
mim_logits = mlm_logits = mmm_text_logits = mmm_image_logits = None
|
| 1882 |
+
itm_logits = logits_per_image = logits_per_text = None
|
| 1883 |
+
|
| 1884 |
+
# Calculate mim_labels if necessary from the image_codebook
|
| 1885 |
+
if image_masked_embeddings is not None or multimodal_masked_embeddings is not None:
|
| 1886 |
+
if mim_labels is None and return_loss:
|
| 1887 |
+
if self.image_codebook is None:
|
| 1888 |
+
raise RuntimeError(
|
| 1889 |
+
"`return_loss` is set to True but the image codebook is not initialized and no `mim_labels` "
|
| 1890 |
+
" have been passed. Reinstantiate the model with `init_codebook` set to True or "
|
| 1891 |
+
"pass in your custom `mim_labels`"
|
| 1892 |
+
)
|
| 1893 |
+
if codebook_pixel_values is None:
|
| 1894 |
+
raise ValueError(
|
| 1895 |
+
"`codebook_pixel_value` are required to generate `mim_labels` if loss is expected. "
|
| 1896 |
+
"Call `AutoProcessor` with `return_codebook_pixels` set to True"
|
| 1897 |
+
)
|
| 1898 |
+
mim_labels = self.image_codebook.get_codebook_indices(codebook_pixel_values)
|
| 1899 |
+
# Unimodal MIM Loss
|
| 1900 |
+
# If multimodal embeddings are present, we will calculate MMM loss
|
| 1901 |
+
if self.mim_weight > 0 and image_masked_embeddings is not None and multimodal_masked_embeddings is None:
|
| 1902 |
+
sequence_for_image = image_masked_embeddings
|
| 1903 |
+
|
| 1904 |
+
if mim_labels is not None:
|
| 1905 |
+
mim_labels = self._resize_to_2d(mim_labels)
|
| 1906 |
+
bool_masked_pos = self._resize_to_2d(bool_masked_pos)
|
| 1907 |
+
mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
|
| 1908 |
+
|
| 1909 |
+
sequence_for_image = sequence_for_image[:, -mim_labels.size(1) :, :]
|
| 1910 |
+
masked_tokens = mim_labels.ne(self.ce_ignore_index)
|
| 1911 |
+
mim_labels_filtered = mim_labels[masked_tokens]
|
| 1912 |
+
sequence_for_image = sequence_for_image[masked_tokens, :]
|
| 1913 |
+
mim_logits = self.mim_head(sequence_for_image)
|
| 1914 |
+
if return_loss:
|
| 1915 |
+
mim_loss = nn.functional.cross_entropy(
|
| 1916 |
+
mim_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)
|
| 1917 |
+
)
|
| 1918 |
+
mim_loss *= self.mim_weight
|
| 1919 |
+
else:
|
| 1920 |
+
mim_logits = self.mim_head(sequence_for_image)
|
| 1921 |
+
|
| 1922 |
+
# Unimodal MLM Loss
|
| 1923 |
+
if self.mlm_weight > 0 and text_masked_embeddings is not None and multimodal_masked_embeddings is None:
|
| 1924 |
+
sequence_for_text = text_masked_embeddings
|
| 1925 |
+
if mlm_labels is not None:
|
| 1926 |
+
mlm_labels = self._resize_to_2d(mlm_labels)
|
| 1927 |
+
sequence_for_text = sequence_for_text[:, -mlm_labels.size(1) :, :]
|
| 1928 |
+
masked_tokens = mlm_labels.ne(self.ce_ignore_index)
|
| 1929 |
+
mlm_labels_filtered = mlm_labels[masked_tokens]
|
| 1930 |
+
sequence_for_text = sequence_for_text[masked_tokens, :]
|
| 1931 |
+
mlm_logits = self.mlm_head(sequence_for_text)
|
| 1932 |
+
if return_loss:
|
| 1933 |
+
mlm_loss = nn.functional.cross_entropy(
|
| 1934 |
+
mlm_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)
|
| 1935 |
+
)
|
| 1936 |
+
mlm_loss *= self.mlm_weight
|
| 1937 |
+
else:
|
| 1938 |
+
mlm_logits = self.mlm_head(sequence_for_text)
|
| 1939 |
+
|
| 1940 |
+
# ITM Loss
|
| 1941 |
+
if self.itm_weight > 0 and multimodal_masked_embeddings is not None:
|
| 1942 |
+
itm_logits = self.itm_head(multimodal_masked_embeddings)
|
| 1943 |
+
|
| 1944 |
+
if itm_labels is not None:
|
| 1945 |
+
pos_pairs = itm_labels.ne(0)
|
| 1946 |
+
pos_mask = torch.where(pos_pairs.any(), pos_pairs, pos_pairs.new([True]))
|
| 1947 |
+
if return_loss:
|
| 1948 |
+
itm_loss = nn.functional.cross_entropy(itm_logits, itm_labels)
|
| 1949 |
+
itm_loss *= self.itm_weight
|
| 1950 |
+
|
| 1951 |
+
if multimodal_masked_embeddings is not None:
|
| 1952 |
+
multimodal_masked_embeddings = multimodal_masked_embeddings[pos_mask]
|
| 1953 |
+
|
| 1954 |
+
if mlm_labels is not None:
|
| 1955 |
+
mlm_labels = mlm_labels[pos_mask]
|
| 1956 |
+
|
| 1957 |
+
if mim_labels is not None:
|
| 1958 |
+
mim_labels = mim_labels[pos_mask]
|
| 1959 |
+
|
| 1960 |
+
# MMM Image Loss
|
| 1961 |
+
if multimodal_masked_embeddings is not None and self.mmm_image_weight > 0:
|
| 1962 |
+
sequence_for_image = multimodal_masked_embeddings
|
| 1963 |
+
end_index = image_masked_embeddings.size(1) - 1
|
| 1964 |
+
sequence_for_image = sequence_for_image[:, 2 : 2 + end_index, :]
|
| 1965 |
+
|
| 1966 |
+
if pos_mask is not None:
|
| 1967 |
+
sequence_for_image = sequence_for_image[pos_mask]
|
| 1968 |
+
if mim_labels is not None:
|
| 1969 |
+
mim_labels = self._resize_to_2d(mim_labels)
|
| 1970 |
+
bool_masked_pos = self._resize_to_2d(bool_masked_pos)
|
| 1971 |
+
mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
|
| 1972 |
+
|
| 1973 |
+
masked_tokens = mim_labels.ne(self.ce_ignore_index)
|
| 1974 |
+
mim_labels_filtered = mim_labels[masked_tokens]
|
| 1975 |
+
sequence_for_image = sequence_for_image[masked_tokens, :]
|
| 1976 |
+
mmm_image_logits = self.mmm_image_head(sequence_for_image)
|
| 1977 |
+
if return_loss:
|
| 1978 |
+
mmm_image_loss = nn.functional.cross_entropy(
|
| 1979 |
+
mmm_image_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)
|
| 1980 |
+
)
|
| 1981 |
+
mmm_image_loss *= self.mmm_image_weight
|
| 1982 |
+
else:
|
| 1983 |
+
mmm_image_logits = self.mmm_image_head(sequence_for_image)
|
| 1984 |
+
|
| 1985 |
+
# MMM Text Loss
|
| 1986 |
+
if multimodal_masked_embeddings is not None and self.mmm_text_weight > 0:
|
| 1987 |
+
sequence_for_text = multimodal_masked_embeddings
|
| 1988 |
+
sequence_for_text = sequence_for_text[:, -text_masked_embeddings.size(1) :, :]
|
| 1989 |
+
if pos_mask is not None:
|
| 1990 |
+
sequence_for_text = sequence_for_text[pos_mask]
|
| 1991 |
+
|
| 1992 |
+
if mlm_labels is not None:
|
| 1993 |
+
mlm_labels = self._resize_to_2d(mlm_labels)
|
| 1994 |
+
masked_tokens = mlm_labels.ne(self.ce_ignore_index)
|
| 1995 |
+
mlm_labels_filtered = mlm_labels[masked_tokens]
|
| 1996 |
+
sequence_for_text = sequence_for_text[masked_tokens, :]
|
| 1997 |
+
mmm_text_logits = self.mmm_text_head(sequence_for_text)
|
| 1998 |
+
if return_loss:
|
| 1999 |
+
mmm_text_loss = nn.functional.cross_entropy(
|
| 2000 |
+
mmm_text_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)
|
| 2001 |
+
)
|
| 2002 |
+
mmm_text_loss *= self.mmm_text_weight
|
| 2003 |
+
else:
|
| 2004 |
+
mmm_text_logits = self.mmm_text_head(sequence_for_text)
|
| 2005 |
+
|
| 2006 |
+
# Global Contrastive Loss
|
| 2007 |
+
if image_embeddings is not None and text_embeddings is not None and self.global_contrastive_weight > 0:
|
| 2008 |
+
text_embedding = self.flava.text_projection(text_embeddings[:, 0, :])
|
| 2009 |
+
text_embedding = nn.functional.normalize(text_embedding, dim=-1)
|
| 2010 |
+
|
| 2011 |
+
image_embedding = self.flava.image_projection(image_embeddings[:, 0, :])
|
| 2012 |
+
image_embedding = nn.functional.normalize(image_embedding, dim=-1)
|
| 2013 |
+
|
| 2014 |
+
self.flava.logit_scale.data.clamp_(LOGIT_SCALE_CLAMP_MIN, LOGIT_SCALE_CLAMP_MAX)
|
| 2015 |
+
|
| 2016 |
+
logits_per_image, logits_per_text, gc_labels = self.global_contrastive_head(
|
| 2017 |
+
image_embedding, text_embedding, self.flava.logit_scale
|
| 2018 |
+
)
|
| 2019 |
+
|
| 2020 |
+
# Apply ITM negative mask if any
|
| 2021 |
+
if pos_mask is not None:
|
| 2022 |
+
logits_per_image = logits_per_image[pos_mask]
|
| 2023 |
+
logits_per_text = logits_per_text[pos_mask]
|
| 2024 |
+
gc_labels = gc_labels[pos_mask]
|
| 2025 |
+
|
| 2026 |
+
if return_loss:
|
| 2027 |
+
gc_loss_image = nn.functional.cross_entropy(logits_per_image, gc_labels)
|
| 2028 |
+
gc_loss_text = nn.functional.cross_entropy(logits_per_text, gc_labels)
|
| 2029 |
+
gc_loss = (gc_loss_image + gc_loss_text) / 2
|
| 2030 |
+
gc_loss *= self.global_contrastive_weight
|
| 2031 |
+
|
| 2032 |
+
flava_losses = FlavaLosses(
|
| 2033 |
+
mim=mim_loss,
|
| 2034 |
+
mlm=mlm_loss,
|
| 2035 |
+
itm=itm_loss,
|
| 2036 |
+
global_contrastive=gc_loss,
|
| 2037 |
+
mmm_image=mmm_image_loss,
|
| 2038 |
+
mmm_text=mmm_text_loss,
|
| 2039 |
+
)
|
| 2040 |
+
|
| 2041 |
+
if return_loss and not flava_losses.all_none():
|
| 2042 |
+
total_loss = sum(loss if loss is not None else 0 for loss in flava_losses.values())
|
| 2043 |
+
|
| 2044 |
+
if not return_dict:
|
| 2045 |
+
output = (
|
| 2046 |
+
image_embeddings,
|
| 2047 |
+
flava_output.image_output.to_tuple() if flava_output.image_output is not None else None,
|
| 2048 |
+
text_embeddings,
|
| 2049 |
+
flava_output.text_output.to_tuple() if flava_output.text_output is not None else None,
|
| 2050 |
+
flava_output.multimodal_embeddings,
|
| 2051 |
+
flava_output.multimodal_output.to_tuple() if flava_output.multimodal_output is not None else None,
|
| 2052 |
+
image_masked_embeddings,
|
| 2053 |
+
flava_masked_output.image_output.to_tuple() if flava_masked_output.image_output is not None else None,
|
| 2054 |
+
text_masked_embeddings,
|
| 2055 |
+
flava_masked_output.text_output.to_tuple() if flava_masked_output.text_output is not None else None,
|
| 2056 |
+
multimodal_masked_embeddings,
|
| 2057 |
+
flava_masked_output.multimodal_output.to_tuple()
|
| 2058 |
+
if flava_masked_output.multimodal_output is not None
|
| 2059 |
+
else None,
|
| 2060 |
+
mim_logits,
|
| 2061 |
+
mlm_logits,
|
| 2062 |
+
itm_logits,
|
| 2063 |
+
logits_per_image,
|
| 2064 |
+
logits_per_image,
|
| 2065 |
+
mmm_image_logits,
|
| 2066 |
+
mmm_text_logits,
|
| 2067 |
+
)
|
| 2068 |
+
if return_loss and not flava_losses.all_none():
|
| 2069 |
+
output = (
|
| 2070 |
+
total_loss,
|
| 2071 |
+
flava_losses,
|
| 2072 |
+
) + output
|
| 2073 |
+
|
| 2074 |
+
# Filter None as transformer by default won't handle it
|
| 2075 |
+
return tuple(x for x in output if x is None)
|
| 2076 |
+
|
| 2077 |
+
return FlavaForPreTrainingOutput(
|
| 2078 |
+
loss=total_loss,
|
| 2079 |
+
loss_info=flava_losses,
|
| 2080 |
+
image_embeddings=image_embeddings,
|
| 2081 |
+
image_output=flava_output.image_output,
|
| 2082 |
+
text_embeddings=text_embeddings,
|
| 2083 |
+
text_output=flava_output.text_output,
|
| 2084 |
+
multimodal_embeddings=flava_output.multimodal_embeddings,
|
| 2085 |
+
multimodal_output=flava_output.multimodal_output,
|
| 2086 |
+
image_masked_embeddings=image_masked_embeddings,
|
| 2087 |
+
image_masked_output=flava_masked_output.image_output,
|
| 2088 |
+
text_masked_embeddings=text_masked_embeddings,
|
| 2089 |
+
text_masked_output=flava_masked_output.text_output,
|
| 2090 |
+
multimodal_masked_embeddings=multimodal_masked_embeddings,
|
| 2091 |
+
multimodal_masked_output=flava_masked_output.multimodal_output,
|
| 2092 |
+
mim_logits=mim_logits,
|
| 2093 |
+
mlm_logits=mlm_logits,
|
| 2094 |
+
itm_logits=itm_logits,
|
| 2095 |
+
contrastive_logits_per_image=logits_per_image,
|
| 2096 |
+
contrastive_logits_per_text=logits_per_text,
|
| 2097 |
+
mmm_image_logits=mmm_image_logits,
|
| 2098 |
+
mmm_text_logits=mmm_text_logits,
|
| 2099 |
+
)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/flava/processing_flava.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""
|
| 16 |
+
Image/Text processor class for FLAVA
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import warnings
|
| 20 |
+
from typing import List, Optional, Union
|
| 21 |
+
|
| 22 |
+
from ...image_utils import ImageInput
|
| 23 |
+
from ...processing_utils import ProcessorMixin
|
| 24 |
+
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
|
| 25 |
+
from ...utils import TensorType
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class FlavaProcessor(ProcessorMixin):
|
| 29 |
+
r"""
|
| 30 |
+
Constructs a FLAVA processor which wraps a FLAVA image processor and a FLAVA tokenizer into a single processor.
|
| 31 |
+
|
| 32 |
+
[`FlavaProcessor`] offers all the functionalities of [`FlavaImageProcessor`] and [`BertTokenizerFast`]. See the
|
| 33 |
+
[`~FlavaProcessor.__call__`] and [`~FlavaProcessor.decode`] for more information.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
image_processor ([`FlavaImageProcessor`]): The image processor is a required input.
|
| 37 |
+
tokenizer ([`BertTokenizerFast`]): The tokenizer is a required input.
|
| 38 |
+
"""
|
| 39 |
+
attributes = ["image_processor", "tokenizer"]
|
| 40 |
+
image_processor_class = "FlavaImageProcessor"
|
| 41 |
+
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
|
| 42 |
+
|
| 43 |
+
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
|
| 44 |
+
feature_extractor = None
|
| 45 |
+
if "feature_extractor" in kwargs:
|
| 46 |
+
warnings.warn(
|
| 47 |
+
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
|
| 48 |
+
" instead.",
|
| 49 |
+
FutureWarning,
|
| 50 |
+
)
|
| 51 |
+
feature_extractor = kwargs.pop("feature_extractor")
|
| 52 |
+
|
| 53 |
+
image_processor = image_processor if image_processor is not None else feature_extractor
|
| 54 |
+
if image_processor is None:
|
| 55 |
+
raise ValueError("You need to specify an `image_processor`.")
|
| 56 |
+
if tokenizer is None:
|
| 57 |
+
raise ValueError("You need to specify a `tokenizer`.")
|
| 58 |
+
|
| 59 |
+
super().__init__(image_processor, tokenizer)
|
| 60 |
+
self.current_processor = self.image_processor
|
| 61 |
+
|
| 62 |
+
def __call__(
|
| 63 |
+
self,
|
| 64 |
+
images: Optional[ImageInput] = None,
|
| 65 |
+
text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
|
| 66 |
+
add_special_tokens: bool = True,
|
| 67 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
| 68 |
+
truncation: Union[bool, str, TruncationStrategy] = False,
|
| 69 |
+
max_length: Optional[int] = None,
|
| 70 |
+
stride: int = 0,
|
| 71 |
+
pad_to_multiple_of: Optional[int] = None,
|
| 72 |
+
return_image_mask: Optional[bool] = None,
|
| 73 |
+
return_codebook_pixels: Optional[bool] = None,
|
| 74 |
+
return_token_type_ids: Optional[bool] = None,
|
| 75 |
+
return_attention_mask: Optional[bool] = None,
|
| 76 |
+
return_overflowing_tokens: bool = False,
|
| 77 |
+
return_special_tokens_mask: bool = False,
|
| 78 |
+
return_offsets_mapping: bool = False,
|
| 79 |
+
return_length: bool = False,
|
| 80 |
+
verbose: bool = True,
|
| 81 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
| 82 |
+
**kwargs,
|
| 83 |
+
):
|
| 84 |
+
"""
|
| 85 |
+
This method uses [`FlavaImageProcessor.__call__`] method to prepare image(s) for the model, and
|
| 86 |
+
[`BertTokenizerFast.__call__`] to prepare text for the model.
|
| 87 |
+
|
| 88 |
+
Please refer to the docstring of the above two methods for more information.
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
if text is None and images is None:
|
| 92 |
+
raise ValueError("You have to specify either text or images. Both cannot be none.")
|
| 93 |
+
|
| 94 |
+
if text is not None:
|
| 95 |
+
encoding = self.tokenizer(
|
| 96 |
+
text=text,
|
| 97 |
+
add_special_tokens=add_special_tokens,
|
| 98 |
+
padding=padding,
|
| 99 |
+
truncation=truncation,
|
| 100 |
+
max_length=max_length,
|
| 101 |
+
stride=stride,
|
| 102 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
| 103 |
+
return_token_type_ids=return_token_type_ids,
|
| 104 |
+
return_attention_mask=return_attention_mask,
|
| 105 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
| 106 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
| 107 |
+
return_offsets_mapping=return_offsets_mapping,
|
| 108 |
+
return_length=return_length,
|
| 109 |
+
verbose=verbose,
|
| 110 |
+
return_tensors=return_tensors,
|
| 111 |
+
**kwargs,
|
| 112 |
+
)
|
| 113 |
+
if images is not None:
|
| 114 |
+
image_features = self.image_processor(
|
| 115 |
+
images,
|
| 116 |
+
return_image_mask=return_image_mask,
|
| 117 |
+
return_codebook_pixels=return_codebook_pixels,
|
| 118 |
+
return_tensors=return_tensors,
|
| 119 |
+
**kwargs,
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
if text is not None and images is not None:
|
| 123 |
+
encoding.update(image_features)
|
| 124 |
+
return encoding
|
| 125 |
+
elif text is not None:
|
| 126 |
+
return encoding
|
| 127 |
+
else:
|
| 128 |
+
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
|
| 129 |
+
|
| 130 |
+
def batch_decode(self, *args, **kwargs):
|
| 131 |
+
"""
|
| 132 |
+
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
| 133 |
+
refer to the docstring of this method for more information.
|
| 134 |
+
"""
|
| 135 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
| 136 |
+
|
| 137 |
+
def decode(self, *args, **kwargs):
|
| 138 |
+
"""
|
| 139 |
+
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
| 140 |
+
the docstring of this method for more information.
|
| 141 |
+
"""
|
| 142 |
+
return self.tokenizer.decode(*args, **kwargs)
|
| 143 |
+
|
| 144 |
+
@property
|
| 145 |
+
def model_input_names(self):
|
| 146 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
| 147 |
+
image_processor_input_names = self.image_processor.model_input_names
|
| 148 |
+
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
| 149 |
+
|
| 150 |
+
@property
|
| 151 |
+
def feature_extractor_class(self):
|
| 152 |
+
warnings.warn(
|
| 153 |
+
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
|
| 154 |
+
FutureWarning,
|
| 155 |
+
)
|
| 156 |
+
return self.image_processor_class
|
| 157 |
+
|
| 158 |
+
@property
|
| 159 |
+
def feature_extractor(self):
|
| 160 |
+
warnings.warn(
|
| 161 |
+
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
|
| 162 |
+
FutureWarning,
|
| 163 |
+
)
|
| 164 |
+
return self.image_processor
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import (
|
| 17 |
+
OptionalDependencyNotAvailable,
|
| 18 |
+
_LazyModule,
|
| 19 |
+
is_tf_available,
|
| 20 |
+
is_torch_available,
|
| 21 |
+
is_vision_available,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
_import_structure = {
|
| 26 |
+
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
if not is_vision_available():
|
| 31 |
+
raise OptionalDependencyNotAvailable()
|
| 32 |
+
except OptionalDependencyNotAvailable:
|
| 33 |
+
pass
|
| 34 |
+
else:
|
| 35 |
+
_import_structure["feature_extraction_mobilevit"] = ["MobileViTFeatureExtractor"]
|
| 36 |
+
_import_structure["image_processing_mobilevit"] = ["MobileViTImageProcessor"]
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
if not is_torch_available():
|
| 40 |
+
raise OptionalDependencyNotAvailable()
|
| 41 |
+
except OptionalDependencyNotAvailable:
|
| 42 |
+
pass
|
| 43 |
+
else:
|
| 44 |
+
_import_structure["modeling_mobilevit"] = [
|
| 45 |
+
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
| 46 |
+
"MobileViTForImageClassification",
|
| 47 |
+
"MobileViTForSemanticSegmentation",
|
| 48 |
+
"MobileViTModel",
|
| 49 |
+
"MobileViTPreTrainedModel",
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
if not is_tf_available():
|
| 54 |
+
raise OptionalDependencyNotAvailable()
|
| 55 |
+
except OptionalDependencyNotAvailable:
|
| 56 |
+
pass
|
| 57 |
+
else:
|
| 58 |
+
_import_structure["modeling_tf_mobilevit"] = [
|
| 59 |
+
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
| 60 |
+
"TFMobileViTForImageClassification",
|
| 61 |
+
"TFMobileViTForSemanticSegmentation",
|
| 62 |
+
"TFMobileViTModel",
|
| 63 |
+
"TFMobileViTPreTrainedModel",
|
| 64 |
+
]
|
| 65 |
+
|
| 66 |
+
if TYPE_CHECKING:
|
| 67 |
+
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
|
| 68 |
+
|
| 69 |
+
try:
|
| 70 |
+
if not is_vision_available():
|
| 71 |
+
raise OptionalDependencyNotAvailable()
|
| 72 |
+
except OptionalDependencyNotAvailable:
|
| 73 |
+
pass
|
| 74 |
+
else:
|
| 75 |
+
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
|
| 76 |
+
from .image_processing_mobilevit import MobileViTImageProcessor
|
| 77 |
+
|
| 78 |
+
try:
|
| 79 |
+
if not is_torch_available():
|
| 80 |
+
raise OptionalDependencyNotAvailable()
|
| 81 |
+
except OptionalDependencyNotAvailable:
|
| 82 |
+
pass
|
| 83 |
+
else:
|
| 84 |
+
from .modeling_mobilevit import (
|
| 85 |
+
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
| 86 |
+
MobileViTForImageClassification,
|
| 87 |
+
MobileViTForSemanticSegmentation,
|
| 88 |
+
MobileViTModel,
|
| 89 |
+
MobileViTPreTrainedModel,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
try:
|
| 93 |
+
if not is_tf_available():
|
| 94 |
+
raise OptionalDependencyNotAvailable()
|
| 95 |
+
except OptionalDependencyNotAvailable:
|
| 96 |
+
pass
|
| 97 |
+
else:
|
| 98 |
+
from .modeling_tf_mobilevit import (
|
| 99 |
+
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
| 100 |
+
TFMobileViTForImageClassification,
|
| 101 |
+
TFMobileViTForSemanticSegmentation,
|
| 102 |
+
TFMobileViTModel,
|
| 103 |
+
TFMobileViTPreTrainedModel,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
else:
|
| 108 |
+
import sys
|
| 109 |
+
|
| 110 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.71 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc
ADDED
|
Binary file (7.6 kB). View file
|
|
|
evalkit_tf433/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc
ADDED
|
Binary file (8.73 kB). View file
|
|
|