ZTWHHH commited on
Commit
3695418
·
verified ·
1 Parent(s): 4649a86

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__init__.py +73 -0
  2. evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__pycache__/__init__.cpython-310.pyc +0 -0
  3. evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc +0 -0
  4. evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__pycache__/convert_bit_to_pytorch.cpython-310.pyc +0 -0
  5. evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc +0 -0
  6. evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc +0 -0
  7. evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/configuration_bit.py +137 -0
  8. evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/convert_bit_to_pytorch.py +178 -0
  9. evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/image_processing_bit.py +323 -0
  10. evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/modeling_bit.py +900 -0
  11. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__init__.py +102 -0
  12. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/__init__.cpython-310.pyc +0 -0
  13. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc +0 -0
  14. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc +0 -0
  15. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/feature_extraction_convnext.cpython-310.pyc +0 -0
  16. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/image_processing_convnext.cpython-310.pyc +0 -0
  17. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_convnext.cpython-310.pyc +0 -0
  18. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_tf_convnext.cpython-310.pyc +0 -0
  19. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py +144 -0
  20. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py +243 -0
  21. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py +33 -0
  22. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py +320 -0
  23. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py +553 -0
  24. evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py +666 -0
  25. evalkit_tf437/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc +0 -0
  26. evalkit_tf437/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc +0 -0
  27. evalkit_tf437/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py +537 -0
  28. evalkit_tf437/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py +205 -0
  29. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__init__.py +77 -0
  30. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/__init__.cpython-310.pyc +0 -0
  31. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/configuration_fastspeech2_conformer.cpython-310.pyc +0 -0
  32. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  33. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_hifigan.cpython-310.pyc +0 -0
  34. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_model_with_hifigan.cpython-310.pyc +0 -0
  35. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/modeling_fastspeech2_conformer.cpython-310.pyc +0 -0
  36. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/tokenization_fastspeech2_conformer.cpython-310.pyc +0 -0
  37. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +488 -0
  38. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.py +210 -0
  39. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_hifigan.py +134 -0
  40. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py +102 -0
  41. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +1686 -0
  42. evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +198 -0
  43. evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py +110 -0
  44. evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/image_processing_mobilevit.cpython-310.pyc +0 -0
  45. evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc +0 -0
  46. evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_tf_mobilevit.cpython-310.pyc +0 -0
  47. evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/configuration_mobilevit.py +185 -0
  48. evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py +312 -0
  49. evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/feature_extraction_mobilevit.py +33 -0
  50. evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/image_processing_mobilevit.py +470 -0
evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_bit": ["BIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BitConfig", "BitOnnxConfig"]}
20
+
21
+ try:
22
+ if not is_torch_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["modeling_bit"] = [
28
+ "BIT_PRETRAINED_MODEL_ARCHIVE_LIST",
29
+ "BitForImageClassification",
30
+ "BitModel",
31
+ "BitPreTrainedModel",
32
+ "BitBackbone",
33
+ ]
34
+
35
+
36
+ try:
37
+ if not is_vision_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["image_processing_bit"] = ["BitImageProcessor"]
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_bit import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BitConfig, BitOnnxConfig
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .modeling_bit import (
55
+ BIT_PRETRAINED_MODEL_ARCHIVE_LIST,
56
+ BitBackbone,
57
+ BitForImageClassification,
58
+ BitModel,
59
+ BitPreTrainedModel,
60
+ )
61
+
62
+ try:
63
+ if not is_vision_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .image_processing_bit import BitImageProcessor
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc ADDED
Binary file (5.6 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__pycache__/convert_bit_to_pytorch.cpython-310.pyc ADDED
Binary file (4.5 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc ADDED
Binary file (23.8 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/configuration_bit.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BiT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+ BIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
25
+ "google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
26
+ }
27
+
28
+
29
+ class BitConfig(BackboneConfigMixin, PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`BitModel`]. It is used to instantiate an BiT
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the BiT
34
+ [google/bit-50](https://huggingface.co/google/bit-50) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ num_channels (`int`, *optional*, defaults to 3):
41
+ The number of input channels.
42
+ embedding_size (`int`, *optional*, defaults to 64):
43
+ Dimensionality (hidden size) for the embedding layer.
44
+ hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
45
+ Dimensionality (hidden size) at each stage.
46
+ depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
47
+ Depth (number of layers) for each stage.
48
+ layer_type (`str`, *optional*, defaults to `"preactivation"`):
49
+ The layer to use, it can be either `"preactivation"` or `"bottleneck"`.
50
+ hidden_act (`str`, *optional*, defaults to `"relu"`):
51
+ The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
52
+ are supported.
53
+ global_padding (`str`, *optional*):
54
+ Padding strategy to use for the convolutional layers. Can be either `"valid"`, `"same"`, or `None`.
55
+ num_groups (`int`, *optional*, defaults to 32):
56
+ Number of groups used for the `BitGroupNormActivation` layers.
57
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
58
+ The drop path rate for the stochastic depth.
59
+ embedding_dynamic_padding (`bool`, *optional*, defaults to `False`):
60
+ Whether or not to make use of dynamic padding for the embedding layer.
61
+ output_stride (`int`, *optional*, defaults to 32):
62
+ The output stride of the model.
63
+ width_factor (`int`, *optional*, defaults to 1):
64
+ The width factor for the model.
65
+ out_features (`List[str]`, *optional*):
66
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
67
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
68
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
69
+ same order as defined in the `stage_names` attribute.
70
+ out_indices (`List[int]`, *optional*):
71
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
72
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
73
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
74
+ same order as defined in the `stage_names` attribute.
75
+
76
+ Example:
77
+ ```python
78
+ >>> from transformers import BitConfig, BitModel
79
+
80
+ >>> # Initializing a BiT bit-50 style configuration
81
+ >>> configuration = BitConfig()
82
+
83
+ >>> # Initializing a model (with random weights) from the bit-50 style configuration
84
+ >>> model = BitModel(configuration)
85
+
86
+ >>> # Accessing the model configuration
87
+ >>> configuration = model.config
88
+ ```
89
+ """
90
+
91
+ model_type = "bit"
92
+ layer_types = ["preactivation", "bottleneck"]
93
+ supported_padding = ["SAME", "VALID"]
94
+
95
+ def __init__(
96
+ self,
97
+ num_channels=3,
98
+ embedding_size=64,
99
+ hidden_sizes=[256, 512, 1024, 2048],
100
+ depths=[3, 4, 6, 3],
101
+ layer_type="preactivation",
102
+ hidden_act="relu",
103
+ global_padding=None,
104
+ num_groups=32,
105
+ drop_path_rate=0.0,
106
+ embedding_dynamic_padding=False,
107
+ output_stride=32,
108
+ width_factor=1,
109
+ out_features=None,
110
+ out_indices=None,
111
+ **kwargs,
112
+ ):
113
+ super().__init__(**kwargs)
114
+ if layer_type not in self.layer_types:
115
+ raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
116
+ if global_padding is not None:
117
+ if global_padding.upper() in self.supported_padding:
118
+ global_padding = global_padding.upper()
119
+ else:
120
+ raise ValueError(f"Padding strategy {global_padding} not supported")
121
+ self.num_channels = num_channels
122
+ self.embedding_size = embedding_size
123
+ self.hidden_sizes = hidden_sizes
124
+ self.depths = depths
125
+ self.layer_type = layer_type
126
+ self.hidden_act = hidden_act
127
+ self.global_padding = global_padding
128
+ self.num_groups = num_groups
129
+ self.drop_path_rate = drop_path_rate
130
+ self.embedding_dynamic_padding = embedding_dynamic_padding
131
+ self.output_stride = output_stride
132
+ self.width_factor = width_factor
133
+
134
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
135
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
136
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
137
+ )
evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/convert_bit_to_pytorch.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert BiT checkpoints from the timm library."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+ from timm import create_model
27
+ from timm.data import resolve_data_config
28
+ from timm.data.transforms_factory import create_transform
29
+
30
+ from transformers import BitConfig, BitForImageClassification, BitImageProcessor
31
+ from transformers.image_utils import PILImageResampling
32
+ from transformers.utils import logging
33
+
34
+
35
+ logging.set_verbosity_info()
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ def get_config(model_name):
40
+ repo_id = "huggingface/label-files"
41
+ filename = "imagenet-1k-id2label.json"
42
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
43
+ id2label = {int(k): v for k, v in id2label.items()}
44
+ label2id = {v: k for k, v in id2label.items()}
45
+
46
+ conv_layer = "std_conv" if "bit" in model_name else False
47
+
48
+ # note that when using BiT as backbone for ViT-hybrid checkpoints,
49
+ # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
50
+ # config.conv_layer = "std_conv_same"
51
+ config = BitConfig(
52
+ conv_layer=conv_layer,
53
+ num_labels=1000,
54
+ id2label=id2label,
55
+ label2id=label2id,
56
+ )
57
+
58
+ return config
59
+
60
+
61
+ def rename_key(name):
62
+ if "stem.conv" in name:
63
+ name = name.replace("stem.conv", "bit.embedder.convolution")
64
+ if "blocks" in name:
65
+ name = name.replace("blocks", "layers")
66
+ if "head.fc" in name:
67
+ name = name.replace("head.fc", "classifier.1")
68
+ if name.startswith("norm"):
69
+ name = "bit." + name
70
+ if "bit" not in name and "classifier" not in name:
71
+ name = "bit.encoder." + name
72
+
73
+ return name
74
+
75
+
76
+ # We will verify our results on an image of cute cats
77
+ def prepare_img():
78
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
79
+ im = Image.open(requests.get(url, stream=True).raw)
80
+ return im
81
+
82
+
83
+ @torch.no_grad()
84
+ def convert_bit_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
85
+ """
86
+ Copy/paste/tweak model's weights to our BiT structure.
87
+ """
88
+
89
+ # define default BiT configuration
90
+ config = get_config(model_name)
91
+
92
+ # load original model from timm
93
+ timm_model = create_model(model_name, pretrained=True)
94
+ timm_model.eval()
95
+
96
+ # load state_dict of original model
97
+ state_dict = timm_model.state_dict()
98
+ for key in state_dict.copy().keys():
99
+ val = state_dict.pop(key)
100
+ state_dict[rename_key(key)] = val.squeeze() if "head" in key else val
101
+
102
+ # load HuggingFace model
103
+ model = BitForImageClassification(config)
104
+ model.eval()
105
+ model.load_state_dict(state_dict)
106
+
107
+ # create image processor
108
+ transform = create_transform(**resolve_data_config({}, model=timm_model))
109
+ timm_transforms = transform.transforms
110
+
111
+ pillow_resamplings = {
112
+ "bilinear": PILImageResampling.BILINEAR,
113
+ "bicubic": PILImageResampling.BICUBIC,
114
+ "nearest": PILImageResampling.NEAREST,
115
+ }
116
+
117
+ processor = BitImageProcessor(
118
+ do_resize=True,
119
+ size={"shortest_edge": timm_transforms[0].size},
120
+ resample=pillow_resamplings[timm_transforms[0].interpolation.value],
121
+ do_center_crop=True,
122
+ crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]},
123
+ do_normalize=True,
124
+ image_mean=timm_transforms[-1].mean.tolist(),
125
+ image_std=timm_transforms[-1].std.tolist(),
126
+ )
127
+
128
+ image = prepare_img()
129
+ timm_pixel_values = transform(image).unsqueeze(0)
130
+ pixel_values = processor(image, return_tensors="pt").pixel_values
131
+
132
+ # verify pixel values
133
+ assert torch.allclose(timm_pixel_values, pixel_values)
134
+
135
+ # verify logits
136
+ with torch.no_grad():
137
+ outputs = model(pixel_values)
138
+ logits = outputs.logits
139
+
140
+ print("Logits:", logits[0, :3])
141
+ print("Predicted class:", model.config.id2label[logits.argmax(-1).item()])
142
+ timm_logits = timm_model(pixel_values)
143
+ assert timm_logits.shape == outputs.logits.shape
144
+ assert torch.allclose(timm_logits, outputs.logits, atol=1e-3)
145
+ print("Looks ok!")
146
+
147
+ if pytorch_dump_folder_path is not None:
148
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
149
+ print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}")
150
+ model.save_pretrained(pytorch_dump_folder_path)
151
+ processor.save_pretrained(pytorch_dump_folder_path)
152
+
153
+ if push_to_hub:
154
+ print(f"Pushing model {model_name} and processor to the hub")
155
+ model.push_to_hub(f"ybelkada/{model_name}")
156
+ processor.push_to_hub(f"ybelkada/{model_name}")
157
+
158
+
159
+ if __name__ == "__main__":
160
+ parser = argparse.ArgumentParser()
161
+ # Required parameters
162
+ parser.add_argument(
163
+ "--model_name",
164
+ default="resnetv2_50x1_bitm",
165
+ type=str,
166
+ help="Name of the BiT timm model you'd like to convert.",
167
+ )
168
+ parser.add_argument(
169
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
170
+ )
171
+ parser.add_argument(
172
+ "--push_to_hub",
173
+ action="store_true",
174
+ help="Whether to push the model to the hub.",
175
+ )
176
+
177
+ args = parser.parse_args()
178
+ convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/image_processing_bit.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for BiT."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ convert_to_rgb,
24
+ get_resize_output_image_size,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ OPENAI_CLIP_MEAN,
30
+ OPENAI_CLIP_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ infer_channel_dimension_format,
35
+ is_scaled_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ valid_images,
39
+ )
40
+ from ...utils import TensorType, is_vision_available, logging
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+
46
+ if is_vision_available():
47
+ import PIL
48
+
49
+
50
+ class BitImageProcessor(BaseImageProcessor):
51
+ r"""
52
+ Constructs a BiT image processor.
53
+
54
+ Args:
55
+ do_resize (`bool`, *optional*, defaults to `True`):
56
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
57
+ `do_resize` in the `preprocess` method.
58
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
59
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
60
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
61
+ method.
62
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
63
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
64
+ do_center_crop (`bool`, *optional*, defaults to `True`):
65
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
66
+ `preprocess` method.
67
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
68
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
69
+ method.
70
+ do_rescale (`bool`, *optional*, defaults to `True`):
71
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
72
+ the `preprocess` method.
73
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
74
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
75
+ method.
76
+ do_normalize:
77
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
78
+ image_mean (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
79
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
80
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
81
+ image_std (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
82
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
83
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
84
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
85
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
86
+ Whether to convert the image to RGB.
87
+ """
88
+
89
+ model_input_names = ["pixel_values"]
90
+
91
+ def __init__(
92
+ self,
93
+ do_resize: bool = True,
94
+ size: Dict[str, int] = None,
95
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
96
+ do_center_crop: bool = True,
97
+ crop_size: Dict[str, int] = None,
98
+ do_rescale: bool = True,
99
+ rescale_factor: Union[int, float] = 1 / 255,
100
+ do_normalize: bool = True,
101
+ image_mean: Optional[Union[float, List[float]]] = None,
102
+ image_std: Optional[Union[float, List[float]]] = None,
103
+ do_convert_rgb: bool = True,
104
+ **kwargs,
105
+ ) -> None:
106
+ super().__init__(**kwargs)
107
+ size = size if size is not None else {"shortest_edge": 224}
108
+ size = get_size_dict(size, default_to_square=False)
109
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
110
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
111
+
112
+ self.do_resize = do_resize
113
+ self.size = size
114
+ self.resample = resample
115
+ self.do_center_crop = do_center_crop
116
+ self.crop_size = crop_size
117
+ self.do_rescale = do_rescale
118
+ self.rescale_factor = rescale_factor
119
+ self.do_normalize = do_normalize
120
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
121
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
122
+ self.do_convert_rgb = do_convert_rgb
123
+
124
+ # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize
125
+ def resize(
126
+ self,
127
+ image: np.ndarray,
128
+ size: Dict[str, int],
129
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
130
+ data_format: Optional[Union[str, ChannelDimension]] = None,
131
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
132
+ **kwargs,
133
+ ) -> np.ndarray:
134
+ """
135
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
136
+ resized to keep the input aspect ratio.
137
+
138
+ Args:
139
+ image (`np.ndarray`):
140
+ Image to resize.
141
+ size (`Dict[str, int]`):
142
+ Size of the output image.
143
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
144
+ Resampling filter to use when resiizing the image.
145
+ data_format (`str` or `ChannelDimension`, *optional*):
146
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
147
+ input_data_format (`ChannelDimension` or `str`, *optional*):
148
+ The channel dimension format of the input image. If not provided, it will be inferred.
149
+ """
150
+ default_to_square = True
151
+ if "shortest_edge" in size:
152
+ size = size["shortest_edge"]
153
+ default_to_square = False
154
+ elif "height" in size and "width" in size:
155
+ size = (size["height"], size["width"])
156
+ else:
157
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
158
+
159
+ output_size = get_resize_output_image_size(
160
+ image,
161
+ size=size,
162
+ default_to_square=default_to_square,
163
+ input_data_format=input_data_format,
164
+ )
165
+ return resize(
166
+ image,
167
+ size=output_size,
168
+ resample=resample,
169
+ data_format=data_format,
170
+ input_data_format=input_data_format,
171
+ **kwargs,
172
+ )
173
+
174
+ def preprocess(
175
+ self,
176
+ images: ImageInput,
177
+ do_resize: bool = None,
178
+ size: Dict[str, int] = None,
179
+ resample: PILImageResampling = None,
180
+ do_center_crop: bool = None,
181
+ crop_size: int = None,
182
+ do_rescale: bool = None,
183
+ rescale_factor: float = None,
184
+ do_normalize: bool = None,
185
+ image_mean: Optional[Union[float, List[float]]] = None,
186
+ image_std: Optional[Union[float, List[float]]] = None,
187
+ do_convert_rgb: bool = None,
188
+ return_tensors: Optional[Union[str, TensorType]] = None,
189
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
190
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
191
+ **kwargs,
192
+ ) -> PIL.Image.Image:
193
+ """
194
+ Preprocess an image or batch of images.
195
+
196
+ Args:
197
+ images (`ImageInput`):
198
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
199
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
200
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
201
+ Whether to resize the image.
202
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
203
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
204
+ the longest edge resized to keep the input aspect ratio.
205
+ resample (`int`, *optional*, defaults to `self.resample`):
206
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
207
+ has an effect if `do_resize` is set to `True`.
208
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
209
+ Whether to center crop the image.
210
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
211
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
212
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
213
+ Whether to rescale the image.
214
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
215
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
216
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
217
+ Whether to normalize the image.
218
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
219
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
220
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
221
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
222
+ `True`.
223
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
224
+ Whether to convert the image to RGB.
225
+ return_tensors (`str` or `TensorType`, *optional*):
226
+ The type of tensors to return. Can be one of:
227
+ - Unset: Return a list of `np.ndarray`.
228
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
229
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
230
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
231
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
232
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
233
+ The channel dimension format for the output image. Can be one of:
234
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
235
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
236
+ - Unset: Use the channel dimension format of the input image.
237
+ input_data_format (`ChannelDimension` or `str`, *optional*):
238
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
239
+ from the input image. Can be one of:
240
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
241
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
242
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
243
+ """
244
+ do_resize = do_resize if do_resize is not None else self.do_resize
245
+ size = size if size is not None else self.size
246
+ size = get_size_dict(size, param_name="size", default_to_square=False)
247
+ resample = resample if resample is not None else self.resample
248
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
249
+ crop_size = crop_size if crop_size is not None else self.crop_size
250
+ crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
251
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
252
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
253
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
254
+ image_mean = image_mean if image_mean is not None else self.image_mean
255
+ image_std = image_std if image_std is not None else self.image_std
256
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
257
+
258
+ images = make_list_of_images(images)
259
+
260
+ if not valid_images(images):
261
+ raise ValueError(
262
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
263
+ "torch.Tensor, tf.Tensor or jax.ndarray."
264
+ )
265
+
266
+ if do_resize and size is None:
267
+ raise ValueError("Size must be specified if do_resize is True.")
268
+
269
+ if do_center_crop and crop_size is None:
270
+ raise ValueError("Crop size must be specified if do_center_crop is True.")
271
+
272
+ if do_rescale and rescale_factor is None:
273
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
274
+
275
+ if do_normalize and (image_mean is None or image_std is None):
276
+ raise ValueError("Image mean and std must be specified if do_normalize is True.")
277
+
278
+ # PIL RGBA images are converted to RGB
279
+ if do_convert_rgb:
280
+ images = [convert_to_rgb(image) for image in images]
281
+
282
+ # All transformations expect numpy arrays.
283
+ images = [to_numpy_array(image) for image in images]
284
+
285
+ if is_scaled_image(images[0]) and do_rescale:
286
+ logger.warning_once(
287
+ "It looks like you are trying to rescale already rescaled images. If the input"
288
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
289
+ )
290
+
291
+ if input_data_format is None:
292
+ # We assume that all images have the same channel dimension format.
293
+ input_data_format = infer_channel_dimension_format(images[0])
294
+
295
+ if do_resize:
296
+ images = [
297
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
298
+ for image in images
299
+ ]
300
+
301
+ if do_center_crop:
302
+ images = [
303
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
304
+ ]
305
+
306
+ if do_rescale:
307
+ images = [
308
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
309
+ for image in images
310
+ ]
311
+
312
+ if do_normalize:
313
+ images = [
314
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
315
+ for image in images
316
+ ]
317
+
318
+ images = [
319
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
320
+ ]
321
+
322
+ data = {"pixel_values": images}
323
+ return BatchFeature(data=data, tensor_type=return_tensors)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/bit/modeling_bit.py ADDED
@@ -0,0 +1,900 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BiT model. Also supports backbone for ViT hybrid."""
16
+
17
+ import collections
18
+ import math
19
+ from typing import Optional, Tuple
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import Tensor, nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BackboneOutput,
30
+ BaseModelOutputWithNoAttention,
31
+ BaseModelOutputWithPoolingAndNoAttention,
32
+ ImageClassifierOutputWithNoAttention,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from ...utils.backbone_utils import BackboneMixin
43
+ from .configuration_bit import BitConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ # General docstring
49
+ _CONFIG_FOR_DOC = "BitConfig"
50
+
51
+ # Base docstring
52
+ _CHECKPOINT_FOR_DOC = "google/bit-50"
53
+ _EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7]
54
+
55
+ # Image classification docstring
56
+ _IMAGE_CLASS_CHECKPOINT = "google/bit-50"
57
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat"
58
+
59
+ BIT_PRETRAINED_MODEL_ARCHIVE_LIST = [
60
+ "google/bit-50",
61
+ # See all BiT models at https://huggingface.co/models?filter=bit
62
+ ]
63
+
64
+
65
+ def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> Tuple[Tuple, bool]:
66
+ r"""
67
+ Utility function to get the tuple padding value given the kernel_size and padding.
68
+
69
+ Args:
70
+ padding (Union[`str`, `int`], *optional*):
71
+ Padding value, can be either `"same"`, `"valid"`. If a different value is provided the default padding from
72
+ PyTorch is used.
73
+ kernel_size (`int`, *optional*, defaults to 7):
74
+ Kernel size of the convolution layers.
75
+ stride (`int`, *optional*, defaults to 1):
76
+ Stride value of the convolution layers.
77
+ dilation (`int`, *optional*, defaults to 1):
78
+ Dilation value of the convolution layers.
79
+ """
80
+ dynamic = False
81
+ if padding is None:
82
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
83
+ return padding, dynamic
84
+
85
+ if isinstance(padding, str):
86
+ # for any string padding, the padding will be calculated for you, one of three ways
87
+ padding = padding.lower()
88
+ if padding == "same":
89
+ # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
90
+ if stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0:
91
+ # static case, no extra overhead
92
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
93
+ else:
94
+ # dynamic 'SAME' padding, has runtime/GPU memory overhead
95
+ padding = 0
96
+ dynamic = True
97
+ elif padding == "valid":
98
+ # 'VALID' padding, same as padding=0
99
+ padding = 0
100
+ else:
101
+ # Default to PyTorch style 'same'-ish symmetric padding
102
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
103
+ return padding, dynamic
104
+
105
+
106
+ class WeightStandardizedConv2d(nn.Conv2d):
107
+ """Conv2d with Weight Standardization. Includes TensorFlow compatible SAME padding. Used for ViT Hybrid model.
108
+
109
+ Paper: [Micro-Batch Training with Batch-Channel Normalization and Weight
110
+ Standardization](https://arxiv.org/abs/1903.10520v2)
111
+ """
112
+
113
+ def __init__(
114
+ self,
115
+ in_channel,
116
+ out_channels,
117
+ kernel_size,
118
+ stride=1,
119
+ padding="SAME",
120
+ dilation=1,
121
+ groups=1,
122
+ bias=False,
123
+ eps=1e-6,
124
+ ):
125
+ padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
126
+ super().__init__(
127
+ in_channel,
128
+ out_channels,
129
+ kernel_size,
130
+ stride=stride,
131
+ padding=padding,
132
+ dilation=dilation,
133
+ groups=groups,
134
+ bias=bias,
135
+ )
136
+ if is_dynamic:
137
+ self.pad = DynamicPad2d(kernel_size, stride, dilation)
138
+ else:
139
+ self.pad = None
140
+ self.eps = eps
141
+
142
+ def forward(self, hidden_state):
143
+ if self.pad is not None:
144
+ hidden_state = self.pad(hidden_state)
145
+ weight = nn.functional.batch_norm(
146
+ self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0.0, eps=self.eps
147
+ ).reshape_as(self.weight)
148
+ hidden_state = nn.functional.conv2d(
149
+ hidden_state, weight, self.bias, self.stride, self.padding, self.dilation, self.groups
150
+ )
151
+ return hidden_state
152
+
153
+
154
+ class BitGroupNormActivation(nn.GroupNorm):
155
+ r"""
156
+ A module that combines group normalization with an activation function.
157
+ """
158
+
159
+ def __init__(self, config, num_channels, eps=1e-5, affine=True, apply_activation=True):
160
+ super(BitGroupNormActivation, self).__init__(config.num_groups, num_channels, eps=eps, affine=affine)
161
+ if apply_activation:
162
+ self.activation = ACT2FN[config.hidden_act]
163
+ else:
164
+ self.activation = nn.Identity()
165
+
166
+ def forward(self, hidden_state):
167
+ hidden_state = nn.functional.group_norm(hidden_state, self.num_groups, self.weight, self.bias, self.eps)
168
+ hidden_state = self.activation(hidden_state)
169
+ return hidden_state
170
+
171
+
172
+ class DynamicPad2d(nn.Module):
173
+ r"""
174
+ A module that wraps dynamic padding of any input, given the parameters of the convolutional layer and the input
175
+ hidden states.
176
+ """
177
+
178
+ def __init__(self, kernel_size, stride, dilation, value=0):
179
+ super().__init__()
180
+ # Safety checkers
181
+ if isinstance(kernel_size, int):
182
+ kernel_size = (kernel_size, kernel_size)
183
+
184
+ if isinstance(stride, int):
185
+ stride = (stride, stride)
186
+
187
+ if isinstance(dilation, int):
188
+ dilation = (dilation, dilation)
189
+
190
+ self.kernel_size = kernel_size
191
+ self.stride = stride
192
+ self.dilation = dilation
193
+ self.value = value
194
+
195
+ def compute_padding(x, kernel_size, stride, dilation):
196
+ return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0)
197
+
198
+ self.compute_padding = compute_padding
199
+
200
+ def __call__(self, input):
201
+ # Get width and height
202
+ input_height, input_width = input.size()[-2:]
203
+
204
+ # Compute the padding values
205
+ padding_height = self.compute_padding(input_height, self.kernel_size[0], self.stride[0], self.dilation[0])
206
+ padding_width = self.compute_padding(input_width, self.kernel_size[1], self.stride[1], self.dilation[1])
207
+
208
+ # apply pad
209
+ if padding_height > 0 or padding_width > 0:
210
+ input = nn.functional.pad(
211
+ input,
212
+ [
213
+ padding_width // 2,
214
+ padding_width - padding_width // 2,
215
+ padding_height // 2,
216
+ padding_height - padding_height // 2,
217
+ ],
218
+ value=self.value,
219
+ )
220
+ return input
221
+
222
+
223
+ class BitMaxPool2d(nn.MaxPool2d):
224
+ """Tensorflow like 'SAME' wrapper for 2D max pooling"""
225
+
226
+ def __init__(
227
+ self,
228
+ kernel_size: int,
229
+ stride=None,
230
+ dilation=1,
231
+ ceil_mode=False,
232
+ padding=(0, 0),
233
+ padding_value=0,
234
+ use_dynamic_padding=True,
235
+ ):
236
+ kernel_size = kernel_size if isinstance(kernel_size, collections.abc.Iterable) else (kernel_size, kernel_size)
237
+ stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride)
238
+ dilation = dilation if isinstance(dilation, collections.abc.Iterable) else (dilation, dilation)
239
+ super().__init__(kernel_size, stride, padding, dilation, ceil_mode)
240
+ if use_dynamic_padding:
241
+ self.pad = DynamicPad2d(kernel_size, stride, dilation, padding_value)
242
+ else:
243
+ self.pad = nn.Identity()
244
+
245
+ def forward(self, hidden_states):
246
+ hidden_states = self.pad(hidden_states)
247
+ return nn.functional.max_pool2d(
248
+ hidden_states, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode
249
+ )
250
+
251
+
252
+ class BitEmbeddings(nn.Module):
253
+ """
254
+ BiT Embeddings (stem) composed of a single aggressive convolution.
255
+ """
256
+
257
+ def __init__(self, config: BitConfig):
258
+ super().__init__()
259
+
260
+ self.convolution = WeightStandardizedConv2d(
261
+ config.num_channels,
262
+ config.embedding_size,
263
+ kernel_size=7,
264
+ stride=2,
265
+ eps=1e-8,
266
+ padding=config.global_padding,
267
+ )
268
+
269
+ self.pooler = BitMaxPool2d(kernel_size=3, stride=2, use_dynamic_padding=config.embedding_dynamic_padding)
270
+
271
+ # Use the same padding strategy as convolutional layers
272
+ if config.global_padding is not None and config.global_padding.upper() == "SAME":
273
+ self.pad = nn.Identity()
274
+ else:
275
+ self.pad = nn.ConstantPad2d(padding=(1, 1, 1, 1), value=0.0)
276
+
277
+ if not config.layer_type == "preactivation":
278
+ self.norm = BitGroupNormActivation(config, num_channels=config.embedding_size)
279
+ else:
280
+ self.norm = nn.Identity()
281
+
282
+ self.num_channels = config.num_channels
283
+
284
+ def forward(self, pixel_values: Tensor) -> Tensor:
285
+ num_channels = pixel_values.shape[1]
286
+ if num_channels != self.num_channels:
287
+ raise ValueError(
288
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
289
+ )
290
+
291
+ embedding = self.convolution(pixel_values)
292
+
293
+ embedding = self.pad(embedding)
294
+
295
+ embedding = self.norm(embedding)
296
+
297
+ embedding = self.pooler(embedding)
298
+
299
+ return embedding
300
+
301
+
302
+ # Copied from transformers.models.convnext.modeling_convnext.drop_path
303
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
304
+ """
305
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
306
+
307
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
308
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
309
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
310
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
311
+ argument.
312
+ """
313
+ if drop_prob == 0.0 or not training:
314
+ return input
315
+ keep_prob = 1 - drop_prob
316
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
317
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
318
+ random_tensor.floor_() # binarize
319
+ output = input.div(keep_prob) * random_tensor
320
+ return output
321
+
322
+
323
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Bit
324
+ class BitDropPath(nn.Module):
325
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
326
+
327
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
328
+ super().__init__()
329
+ self.drop_prob = drop_prob
330
+
331
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
332
+ return drop_path(hidden_states, self.drop_prob, self.training)
333
+
334
+ def extra_repr(self) -> str:
335
+ return "p={}".format(self.drop_prob)
336
+
337
+
338
+ def make_div(value, divisor=8):
339
+ min_value = divisor
340
+ new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
341
+ if new_value < 0.9 * value:
342
+ new_value += divisor
343
+ return new_value
344
+
345
+
346
+ class BitPreActivationBottleneckLayer(nn.Module):
347
+ """Pre-activation (v2) bottleneck block.
348
+ Follows the implementation of "Identity Mappings in Deep Residual Networks":
349
+ https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
350
+
351
+ Except it puts the stride on 3x3 conv when available.
352
+ """
353
+
354
+ def __init__(
355
+ self,
356
+ config,
357
+ in_channels,
358
+ out_channels=None,
359
+ bottle_ratio=0.25,
360
+ stride=1,
361
+ dilation=1,
362
+ first_dilation=None,
363
+ groups=1,
364
+ drop_path_rate=0.0,
365
+ is_first_layer=False,
366
+ ):
367
+ super().__init__()
368
+
369
+ first_dilation = first_dilation or dilation
370
+
371
+ out_channels = out_channels or in_channels
372
+ mid_channels = make_div(out_channels * bottle_ratio)
373
+
374
+ if is_first_layer:
375
+ self.downsample = BitDownsampleConv(
376
+ config,
377
+ in_channels,
378
+ out_channels,
379
+ stride=stride,
380
+ preact=True,
381
+ )
382
+ else:
383
+ self.downsample = None
384
+
385
+ self.norm1 = BitGroupNormActivation(config, in_channels)
386
+ self.conv1 = WeightStandardizedConv2d(in_channels, mid_channels, 1, eps=1e-8, padding=config.global_padding)
387
+
388
+ self.norm2 = BitGroupNormActivation(config, num_channels=mid_channels)
389
+ self.conv2 = WeightStandardizedConv2d(
390
+ mid_channels, mid_channels, 3, stride=stride, groups=groups, eps=1e-8, padding=config.global_padding
391
+ )
392
+
393
+ self.norm3 = BitGroupNormActivation(config, mid_channels)
394
+ self.conv3 = WeightStandardizedConv2d(mid_channels, out_channels, 1, eps=1e-8, padding=config.global_padding)
395
+
396
+ self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
397
+
398
+ def forward(self, hidden_states):
399
+ hidden_states_preact = self.norm1(hidden_states)
400
+
401
+ # shortcut branch
402
+ shortcut = hidden_states
403
+ if self.downsample is not None:
404
+ shortcut = self.downsample(hidden_states_preact)
405
+
406
+ # residual branch
407
+ hidden_states = self.conv1(hidden_states_preact)
408
+ hidden_states = self.conv2(self.norm2(hidden_states))
409
+ hidden_states = self.conv3(self.norm3(hidden_states))
410
+ hidden_states = self.drop_path(hidden_states)
411
+ return hidden_states + shortcut
412
+
413
+
414
+ class BitBottleneckLayer(nn.Module):
415
+ """Non Pre-activation bottleneck block, equivalent to V1.5/V1b bottleneck. Used for ViT Hybrid."""
416
+
417
+ def __init__(
418
+ self,
419
+ config,
420
+ in_channels,
421
+ out_channels=None,
422
+ bottle_ratio=0.25,
423
+ stride=1,
424
+ dilation=1,
425
+ first_dilation=None,
426
+ groups=1,
427
+ drop_path_rate=0.0,
428
+ is_first_layer=False,
429
+ ):
430
+ super().__init__()
431
+ first_dilation = first_dilation or dilation
432
+
433
+ out_channels = out_channels or in_channels
434
+ mid_chs = make_div(out_channels * bottle_ratio)
435
+
436
+ if is_first_layer:
437
+ self.downsample = BitDownsampleConv(
438
+ config,
439
+ in_channels,
440
+ out_channels,
441
+ stride=stride,
442
+ preact=False,
443
+ )
444
+ else:
445
+ self.downsample = None
446
+
447
+ self.conv1 = WeightStandardizedConv2d(in_channels, mid_chs, 1, eps=1e-8, padding=config.global_padding)
448
+ self.norm1 = BitGroupNormActivation(config, num_channels=mid_chs)
449
+ self.conv2 = WeightStandardizedConv2d(
450
+ mid_chs,
451
+ mid_chs,
452
+ 3,
453
+ stride=stride,
454
+ dilation=first_dilation,
455
+ groups=groups,
456
+ eps=1e-8,
457
+ padding=config.global_padding,
458
+ )
459
+ self.norm2 = BitGroupNormActivation(config, num_channels=mid_chs)
460
+ self.conv3 = WeightStandardizedConv2d(mid_chs, out_channels, 1, eps=1e-8, padding=config.global_padding)
461
+ self.norm3 = BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
462
+ self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
463
+
464
+ self.activation = ACT2FN[config.hidden_act]
465
+
466
+ def forward(self, hidden_states):
467
+ # shortcut branch
468
+ shortcut = hidden_states
469
+ if self.downsample is not None:
470
+ shortcut = self.downsample(hidden_states)
471
+
472
+ # residual
473
+ hidden_states = self.conv1(hidden_states)
474
+ hidden_states = self.norm1(hidden_states)
475
+
476
+ hidden_states = self.conv2(hidden_states)
477
+ hidden_states = self.norm2(hidden_states)
478
+
479
+ hidden_states = self.conv3(hidden_states)
480
+ hidden_states = self.norm3(hidden_states)
481
+
482
+ hidden_states = self.drop_path(hidden_states)
483
+ hidden_states = self.activation(hidden_states + shortcut)
484
+ return hidden_states
485
+
486
+
487
+ class BitDownsampleConv(nn.Module):
488
+ def __init__(
489
+ self,
490
+ config,
491
+ in_channels,
492
+ out_channels,
493
+ stride=1,
494
+ preact=True,
495
+ ):
496
+ super().__init__()
497
+ self.conv = WeightStandardizedConv2d(
498
+ in_channels, out_channels, 1, stride=stride, eps=1e-8, padding=config.global_padding
499
+ )
500
+ self.norm = (
501
+ nn.Identity()
502
+ if preact
503
+ else BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
504
+ )
505
+
506
+ def forward(self, x):
507
+ return self.norm(self.conv(x))
508
+
509
+
510
+ class BitStage(nn.Module):
511
+ """
512
+ A ResNet v2 stage composed by stacked layers.
513
+ """
514
+
515
+ def __init__(
516
+ self,
517
+ config,
518
+ in_channels,
519
+ out_channels,
520
+ stride,
521
+ dilation,
522
+ depth,
523
+ bottle_ratio=0.25,
524
+ layer_dropout=None,
525
+ ):
526
+ super().__init__()
527
+
528
+ first_dilation = 1 if dilation in (1, 2) else 2
529
+
530
+ # Get the layer type
531
+ if config.layer_type == "bottleneck":
532
+ layer_cls = BitBottleneckLayer
533
+ else:
534
+ layer_cls = BitPreActivationBottleneckLayer
535
+
536
+ prev_chs = in_channels
537
+ self.layers = nn.Sequential()
538
+ for layer_idx in range(depth):
539
+ # Get the current hyper-parameters
540
+ stride, drop_path_rate, is_first_layer = self._get_updated_hyperparameters(
541
+ layer_idx, stride, layer_dropout
542
+ )
543
+
544
+ self.layers.add_module(
545
+ str(layer_idx),
546
+ layer_cls(
547
+ config,
548
+ prev_chs,
549
+ out_channels,
550
+ stride=stride,
551
+ dilation=dilation,
552
+ bottle_ratio=bottle_ratio,
553
+ first_dilation=first_dilation,
554
+ drop_path_rate=drop_path_rate,
555
+ is_first_layer=is_first_layer,
556
+ ),
557
+ )
558
+ prev_chs = out_channels
559
+ first_dilation = dilation
560
+
561
+ def _get_updated_hyperparameters(self, layer_idx, stride, layer_dropout):
562
+ r"""
563
+ Get the new hyper-parameters with respect to the previous ones and the index of the current layer.
564
+ """
565
+ if layer_dropout:
566
+ drop_path_rate = layer_dropout[layer_idx]
567
+ else:
568
+ drop_path_rate = 0.0
569
+
570
+ if layer_idx != 0:
571
+ stride = 1
572
+
573
+ is_first_layer = layer_idx == 0
574
+
575
+ return stride, drop_path_rate, is_first_layer
576
+
577
+ def forward(self, input: Tensor) -> Tensor:
578
+ hidden_state = input
579
+ for _, layer in enumerate(self.layers):
580
+ hidden_state = layer(hidden_state)
581
+ return hidden_state
582
+
583
+
584
+ class BitEncoder(nn.Module):
585
+ def __init__(self, config: BitConfig):
586
+ super().__init__()
587
+ self.stages = nn.ModuleList([])
588
+
589
+ prev_chs = config.embedding_size
590
+
591
+ # These needs to stay hardcoded
592
+ current_stride = 4
593
+ dilation = 1
594
+
595
+ layer_dropouts = [
596
+ x.tolist()
597
+ for x in torch.Tensor(np.linspace(0, config.drop_path_rate, sum(config.depths))).split(config.depths)
598
+ ]
599
+
600
+ for stage_idx, (current_depth, current_hidden_size, layer_dropout) in enumerate(
601
+ zip(config.depths, config.hidden_sizes, layer_dropouts)
602
+ ):
603
+ # Get the updated hyper params
604
+ out_channels, stride, dilation = self._get_updated_hyperparameters(
605
+ stage_idx, current_stride, current_hidden_size, dilation, config
606
+ )
607
+
608
+ stage = BitStage(
609
+ config,
610
+ prev_chs,
611
+ out_channels,
612
+ stride=stride,
613
+ dilation=dilation,
614
+ depth=current_depth,
615
+ layer_dropout=layer_dropout,
616
+ )
617
+
618
+ prev_chs = out_channels
619
+ current_stride *= stride
620
+
621
+ self.stages.add_module(str(stage_idx), stage)
622
+
623
+ def _get_updated_hyperparameters(self, stage_idx, current_stride, current_hidden_size, dilation, config):
624
+ out_channels = make_div(current_hidden_size * config.width_factor)
625
+ stride = 1 if stage_idx == 0 else 2
626
+ if current_stride >= config.output_stride:
627
+ dilation *= stride
628
+ stride = 1
629
+ return out_channels, stride, dilation
630
+
631
+ def forward(
632
+ self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
633
+ ) -> BaseModelOutputWithNoAttention:
634
+ hidden_states = () if output_hidden_states else None
635
+
636
+ for stage_module in self.stages:
637
+ if output_hidden_states:
638
+ hidden_states = hidden_states + (hidden_state,)
639
+
640
+ hidden_state = stage_module(hidden_state)
641
+
642
+ if output_hidden_states:
643
+ hidden_states = hidden_states + (hidden_state,)
644
+
645
+ if not return_dict:
646
+ return tuple(v for v in [hidden_state, hidden_states] if v is not None)
647
+
648
+ return BaseModelOutputWithNoAttention(
649
+ last_hidden_state=hidden_state,
650
+ hidden_states=hidden_states,
651
+ )
652
+
653
+
654
+ class BitPreTrainedModel(PreTrainedModel):
655
+ """
656
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
657
+ models.
658
+ """
659
+
660
+ config_class = BitConfig
661
+ base_model_prefix = "bit"
662
+ main_input_name = "pixel_values"
663
+
664
+ def _init_weights(self, module):
665
+ if isinstance(module, nn.Conv2d):
666
+ nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
667
+ elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
668
+ nn.init.constant_(module.weight, 1)
669
+ nn.init.constant_(module.bias, 0)
670
+
671
+
672
+ BIT_START_DOCSTRING = r"""
673
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
674
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
675
+ behavior.
676
+
677
+ Parameters:
678
+ config ([`BitConfig`]): Model configuration class with all the parameters of the model.
679
+ Initializing with a config file does not load the weights associated with the model, only the
680
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
681
+ """
682
+
683
+ BIT_INPUTS_DOCSTRING = r"""
684
+ Args:
685
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
686
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`BitImageProcessor.__call__`]
687
+ for details.
688
+
689
+ output_hidden_states (`bool`, *optional*):
690
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
691
+ more detail.
692
+ return_dict (`bool`, *optional*):
693
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
694
+ """
695
+
696
+
697
+ @add_start_docstrings(
698
+ "The bare BiT model outputting raw features without any specific head on top.",
699
+ BIT_START_DOCSTRING,
700
+ )
701
+ class BitModel(BitPreTrainedModel):
702
+ def __init__(self, config):
703
+ super().__init__(config)
704
+ self.config = config
705
+
706
+ self.embedder = BitEmbeddings(config)
707
+
708
+ self.encoder = BitEncoder(config)
709
+ self.norm = (
710
+ BitGroupNormActivation(config, num_channels=config.hidden_sizes[-1])
711
+ if config.layer_type == "preactivation"
712
+ else nn.Identity()
713
+ )
714
+
715
+ self.pooler = nn.AdaptiveAvgPool2d((1, 1))
716
+ # Initialize weights and apply final processing
717
+ self.post_init()
718
+
719
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
720
+ @add_code_sample_docstrings(
721
+ checkpoint=_CHECKPOINT_FOR_DOC,
722
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
723
+ config_class=_CONFIG_FOR_DOC,
724
+ modality="vision",
725
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
726
+ )
727
+ def forward(
728
+ self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
729
+ ) -> BaseModelOutputWithPoolingAndNoAttention:
730
+ output_hidden_states = (
731
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
732
+ )
733
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
734
+
735
+ embedding_output = self.embedder(pixel_values)
736
+
737
+ encoder_outputs = self.encoder(
738
+ embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict
739
+ )
740
+
741
+ last_hidden_state = encoder_outputs[0]
742
+
743
+ last_hidden_state = self.norm(last_hidden_state)
744
+
745
+ pooled_output = self.pooler(last_hidden_state)
746
+
747
+ if not return_dict:
748
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
749
+
750
+ return BaseModelOutputWithPoolingAndNoAttention(
751
+ last_hidden_state=last_hidden_state,
752
+ pooler_output=pooled_output,
753
+ hidden_states=encoder_outputs.hidden_states,
754
+ )
755
+
756
+
757
+ @add_start_docstrings(
758
+ """
759
+ BiT Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
760
+ ImageNet.
761
+ """,
762
+ BIT_START_DOCSTRING,
763
+ )
764
+ class BitForImageClassification(BitPreTrainedModel):
765
+ def __init__(self, config):
766
+ super().__init__(config)
767
+ self.num_labels = config.num_labels
768
+ self.bit = BitModel(config)
769
+ # classification head
770
+ self.classifier = nn.Sequential(
771
+ nn.Flatten(),
772
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(),
773
+ )
774
+ # initialize weights and apply final processing
775
+ self.post_init()
776
+
777
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
778
+ @add_code_sample_docstrings(
779
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
780
+ output_type=ImageClassifierOutputWithNoAttention,
781
+ config_class=_CONFIG_FOR_DOC,
782
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
783
+ )
784
+ def forward(
785
+ self,
786
+ pixel_values: Optional[torch.FloatTensor] = None,
787
+ labels: Optional[torch.LongTensor] = None,
788
+ output_hidden_states: Optional[bool] = None,
789
+ return_dict: Optional[bool] = None,
790
+ ) -> ImageClassifierOutputWithNoAttention:
791
+ r"""
792
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
793
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
794
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
795
+ """
796
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
797
+
798
+ outputs = self.bit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
799
+
800
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
801
+
802
+ logits = self.classifier(pooled_output)
803
+
804
+ loss = None
805
+
806
+ if labels is not None:
807
+ if self.config.problem_type is None:
808
+ if self.num_labels == 1:
809
+ self.config.problem_type = "regression"
810
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
811
+ self.config.problem_type = "single_label_classification"
812
+ else:
813
+ self.config.problem_type = "multi_label_classification"
814
+ if self.config.problem_type == "regression":
815
+ loss_fct = MSELoss()
816
+ if self.num_labels == 1:
817
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
818
+ else:
819
+ loss = loss_fct(logits, labels)
820
+ elif self.config.problem_type == "single_label_classification":
821
+ loss_fct = CrossEntropyLoss()
822
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
823
+ elif self.config.problem_type == "multi_label_classification":
824
+ loss_fct = BCEWithLogitsLoss()
825
+ loss = loss_fct(logits, labels)
826
+
827
+ if not return_dict:
828
+ output = (logits,) + outputs[2:]
829
+ return (loss,) + output if loss is not None else output
830
+
831
+ return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
832
+
833
+
834
+ @add_start_docstrings(
835
+ """
836
+ BiT backbone, to be used with frameworks like DETR and MaskFormer.
837
+ """,
838
+ BIT_START_DOCSTRING,
839
+ )
840
+ class BitBackbone(BitPreTrainedModel, BackboneMixin):
841
+ def __init__(self, config):
842
+ super().__init__(config)
843
+ super()._init_backbone(config)
844
+
845
+ self.bit = BitModel(config)
846
+ self.num_features = [config.embedding_size] + config.hidden_sizes
847
+
848
+ # initialize weights and apply final processing
849
+ self.post_init()
850
+
851
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
852
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
853
+ def forward(
854
+ self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
855
+ ) -> BackboneOutput:
856
+ """
857
+ Returns:
858
+
859
+ Examples:
860
+
861
+ ```python
862
+ >>> from transformers import AutoImageProcessor, AutoBackbone
863
+ >>> import torch
864
+ >>> from PIL import Image
865
+ >>> import requests
866
+
867
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
868
+ >>> image = Image.open(requests.get(url, stream=True).raw)
869
+
870
+ >>> processor = AutoImageProcessor.from_pretrained("google/resnetnv2-50")
871
+ >>> model = AutoBackbone.from_pretrained("google/resnetnv2-50")
872
+
873
+ >>> inputs = processor(image, return_tensors="pt")
874
+ >>> outputs = model(**inputs)
875
+ ```"""
876
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
877
+ output_hidden_states = (
878
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
879
+ )
880
+
881
+ outputs = self.bit(pixel_values, output_hidden_states=True, return_dict=True)
882
+
883
+ hidden_states = outputs.hidden_states
884
+
885
+ feature_maps = ()
886
+ for idx, stage in enumerate(self.stage_names):
887
+ if stage in self.out_features:
888
+ feature_maps += (hidden_states[idx],)
889
+
890
+ if not return_dict:
891
+ output = (feature_maps,)
892
+ if output_hidden_states:
893
+ output += (outputs.hidden_states,)
894
+ return output
895
+
896
+ return BackboneOutput(
897
+ feature_maps=feature_maps,
898
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
899
+ attentions=None,
900
+ )
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__init__.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
27
+ }
28
+
29
+ try:
30
+ if not is_vision_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["feature_extraction_convnext"] = ["ConvNextFeatureExtractor"]
36
+ _import_structure["image_processing_convnext"] = ["ConvNextImageProcessor"]
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_convnext"] = [
45
+ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "ConvNextForImageClassification",
47
+ "ConvNextModel",
48
+ "ConvNextPreTrainedModel",
49
+ "ConvNextBackbone",
50
+ ]
51
+
52
+ try:
53
+ if not is_tf_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ _import_structure["modeling_tf_convnext"] = [
59
+ "TFConvNextForImageClassification",
60
+ "TFConvNextModel",
61
+ "TFConvNextPreTrainedModel",
62
+ ]
63
+
64
+ if TYPE_CHECKING:
65
+ from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
66
+
67
+ try:
68
+ if not is_vision_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ from .feature_extraction_convnext import ConvNextFeatureExtractor
74
+ from .image_processing_convnext import ConvNextImageProcessor
75
+
76
+ try:
77
+ if not is_torch_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .modeling_convnext import (
83
+ CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
84
+ ConvNextBackbone,
85
+ ConvNextForImageClassification,
86
+ ConvNextModel,
87
+ ConvNextPreTrainedModel,
88
+ )
89
+
90
+ try:
91
+ if not is_tf_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
97
+
98
+
99
+ else:
100
+ import sys
101
+
102
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc ADDED
Binary file (6.08 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc ADDED
Binary file (7.13 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/feature_extraction_convnext.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/image_processing_convnext.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_convnext.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_tf_convnext.cpython-310.pyc ADDED
Binary file (22.2 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ConvNeXT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
31
+ "facebook/convnext-tiny-224": "https://huggingface.co/facebook/convnext-tiny-224/resolve/main/config.json",
32
+ # See all ConvNeXT models at https://huggingface.co/models?filter=convnext
33
+ }
34
+
35
+
36
+ class ConvNextConfig(BackboneConfigMixin, PretrainedConfig):
37
+ r"""
38
+ This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an
39
+ ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
40
+ with the defaults will yield a similar configuration to that of the ConvNeXT
41
+ [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture.
42
+
43
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
44
+ documentation from [`PretrainedConfig`] for more information.
45
+
46
+ Args:
47
+ num_channels (`int`, *optional*, defaults to 3):
48
+ The number of input channels.
49
+ patch_size (`int`, optional, defaults to 4):
50
+ Patch size to use in the patch embedding layer.
51
+ num_stages (`int`, optional, defaults to 4):
52
+ The number of stages in the model.
53
+ hidden_sizes (`List[int]`, *optional*, defaults to [96, 192, 384, 768]):
54
+ Dimensionality (hidden size) at each stage.
55
+ depths (`List[int]`, *optional*, defaults to [3, 3, 9, 3]):
56
+ Depth (number of blocks) for each stage.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
59
+ `"selu"` and `"gelu_new"` are supported.
60
+ initializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
62
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
63
+ The epsilon used by the layer normalization layers.
64
+ layer_scale_init_value (`float`, *optional*, defaults to 1e-6):
65
+ The initial value for the layer scale.
66
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
67
+ The drop rate for stochastic depth.
68
+ out_features (`List[str]`, *optional*):
69
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
70
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
71
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
72
+ same order as defined in the `stage_names` attribute.
73
+ out_indices (`List[int]`, *optional*):
74
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
75
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
76
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
77
+ same order as defined in the `stage_names` attribute.
78
+
79
+ Example:
80
+ ```python
81
+ >>> from transformers import ConvNextConfig, ConvNextModel
82
+
83
+ >>> # Initializing a ConvNext convnext-tiny-224 style configuration
84
+ >>> configuration = ConvNextConfig()
85
+
86
+ >>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration
87
+ >>> model = ConvNextModel(configuration)
88
+
89
+ >>> # Accessing the model configuration
90
+ >>> configuration = model.config
91
+ ```"""
92
+
93
+ model_type = "convnext"
94
+
95
+ def __init__(
96
+ self,
97
+ num_channels=3,
98
+ patch_size=4,
99
+ num_stages=4,
100
+ hidden_sizes=None,
101
+ depths=None,
102
+ hidden_act="gelu",
103
+ initializer_range=0.02,
104
+ layer_norm_eps=1e-12,
105
+ layer_scale_init_value=1e-6,
106
+ drop_path_rate=0.0,
107
+ image_size=224,
108
+ out_features=None,
109
+ out_indices=None,
110
+ **kwargs,
111
+ ):
112
+ super().__init__(**kwargs)
113
+
114
+ self.num_channels = num_channels
115
+ self.patch_size = patch_size
116
+ self.num_stages = num_stages
117
+ self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
118
+ self.depths = [3, 3, 9, 3] if depths is None else depths
119
+ self.hidden_act = hidden_act
120
+ self.initializer_range = initializer_range
121
+ self.layer_norm_eps = layer_norm_eps
122
+ self.layer_scale_init_value = layer_scale_init_value
123
+ self.drop_path_rate = drop_path_rate
124
+ self.image_size = image_size
125
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
126
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
127
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
128
+ )
129
+
130
+
131
+ class ConvNextOnnxConfig(OnnxConfig):
132
+ torch_onnx_minimum_version = version.parse("1.11")
133
+
134
+ @property
135
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
136
+ return OrderedDict(
137
+ [
138
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
139
+ ]
140
+ )
141
+
142
+ @property
143
+ def atol_for_validation(self) -> float:
144
+ return 1e-5
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ConvNext checkpoints from the original repository.
16
+
17
+ URL: https://github.com/facebookresearch/ConvNeXt"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from pathlib import Path
23
+
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import hf_hub_download
27
+ from PIL import Image
28
+
29
+ from transformers import ConvNextConfig, ConvNextForImageClassification, ConvNextImageProcessor
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_convnext_config(checkpoint_url):
38
+ config = ConvNextConfig()
39
+
40
+ if "tiny" in checkpoint_url:
41
+ depths = [3, 3, 9, 3]
42
+ hidden_sizes = [96, 192, 384, 768]
43
+ if "small" in checkpoint_url:
44
+ depths = [3, 3, 27, 3]
45
+ hidden_sizes = [96, 192, 384, 768]
46
+ if "base" in checkpoint_url:
47
+ depths = [3, 3, 27, 3]
48
+ hidden_sizes = [128, 256, 512, 1024]
49
+ if "large" in checkpoint_url:
50
+ depths = [3, 3, 27, 3]
51
+ hidden_sizes = [192, 384, 768, 1536]
52
+ if "xlarge" in checkpoint_url:
53
+ depths = [3, 3, 27, 3]
54
+ hidden_sizes = [256, 512, 1024, 2048]
55
+
56
+ if "1k" in checkpoint_url:
57
+ num_labels = 1000
58
+ filename = "imagenet-1k-id2label.json"
59
+ expected_shape = (1, 1000)
60
+ else:
61
+ num_labels = 21841
62
+ filename = "imagenet-22k-id2label.json"
63
+ expected_shape = (1, 21841)
64
+
65
+ repo_id = "huggingface/label-files"
66
+ config.num_labels = num_labels
67
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
68
+ id2label = {int(k): v for k, v in id2label.items()}
69
+ if "1k" not in checkpoint_url:
70
+ # this dataset contains 21843 labels but the model only has 21841
71
+ # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
72
+ del id2label[9205]
73
+ del id2label[15027]
74
+ config.id2label = id2label
75
+ config.label2id = {v: k for k, v in id2label.items()}
76
+ config.hidden_sizes = hidden_sizes
77
+ config.depths = depths
78
+
79
+ return config, expected_shape
80
+
81
+
82
+ def rename_key(name):
83
+ if "downsample_layers.0.0" in name:
84
+ name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
85
+ if "downsample_layers.0.1" in name:
86
+ name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on
87
+ if "downsample_layers.1.0" in name:
88
+ name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
89
+ if "downsample_layers.1.1" in name:
90
+ name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
91
+ if "downsample_layers.2.0" in name:
92
+ name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
93
+ if "downsample_layers.2.1" in name:
94
+ name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
95
+ if "downsample_layers.3.0" in name:
96
+ name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
97
+ if "downsample_layers.3.1" in name:
98
+ name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
99
+ if "stages" in name and "downsampling_layer" not in name:
100
+ # stages.0.0. for instance should be renamed to stages.0.layers.0.
101
+ name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
102
+ if "stages" in name:
103
+ name = name.replace("stages", "encoder.stages")
104
+ if "norm" in name:
105
+ name = name.replace("norm", "layernorm")
106
+ if "gamma" in name:
107
+ name = name.replace("gamma", "layer_scale_parameter")
108
+ if "head" in name:
109
+ name = name.replace("head", "classifier")
110
+
111
+ return name
112
+
113
+
114
+ # We will verify our results on an image of cute cats
115
+ def prepare_img():
116
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
117
+ im = Image.open(requests.get(url, stream=True).raw)
118
+ return im
119
+
120
+
121
+ @torch.no_grad()
122
+ def convert_convnext_checkpoint(checkpoint_url, pytorch_dump_folder_path):
123
+ """
124
+ Copy/paste/tweak model's weights to our ConvNext structure.
125
+ """
126
+
127
+ # define ConvNext configuration based on URL
128
+ config, expected_shape = get_convnext_config(checkpoint_url)
129
+ # load original state_dict from URL
130
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"]
131
+ # rename keys
132
+ for key in state_dict.copy().keys():
133
+ val = state_dict.pop(key)
134
+ state_dict[rename_key(key)] = val
135
+ # add prefix to all keys expect classifier head
136
+ for key in state_dict.copy().keys():
137
+ val = state_dict.pop(key)
138
+ if not key.startswith("classifier"):
139
+ key = "convnext." + key
140
+ state_dict[key] = val
141
+
142
+ # load HuggingFace model
143
+ model = ConvNextForImageClassification(config)
144
+ model.load_state_dict(state_dict)
145
+ model.eval()
146
+
147
+ # Check outputs on an image, prepared by ConvNextImageProcessor
148
+ size = 224 if "224" in checkpoint_url else 384
149
+ image_processor = ConvNextImageProcessor(size=size)
150
+ pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values
151
+
152
+ logits = model(pixel_values).logits
153
+
154
+ # note: the logits below were obtained without center cropping
155
+ if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth":
156
+ expected_logits = torch.tensor([-0.1210, -0.6605, 0.1918])
157
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth":
158
+ expected_logits = torch.tensor([-0.4473, -0.1847, -0.6365])
159
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth":
160
+ expected_logits = torch.tensor([0.4525, 0.7539, 0.0308])
161
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_384.pth":
162
+ expected_logits = torch.tensor([0.3561, 0.6350, -0.0384])
163
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth":
164
+ expected_logits = torch.tensor([0.4174, -0.0989, 0.1489])
165
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_384.pth":
166
+ expected_logits = torch.tensor([0.2513, -0.1349, -0.1613])
167
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth":
168
+ expected_logits = torch.tensor([1.2980, 0.3631, -0.1198])
169
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth":
170
+ expected_logits = torch.tensor([1.2963, 0.1227, 0.1723])
171
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth":
172
+ expected_logits = torch.tensor([1.7956, 0.8390, 0.2820])
173
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth":
174
+ expected_logits = torch.tensor([-0.2822, -0.0502, -0.0878])
175
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth":
176
+ expected_logits = torch.tensor([-0.5672, -0.0730, -0.4348])
177
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth":
178
+ expected_logits = torch.tensor([0.2681, 0.2365, 0.6246])
179
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth":
180
+ expected_logits = torch.tensor([-0.2642, 0.3931, 0.5116])
181
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth":
182
+ expected_logits = torch.tensor([-0.6677, -0.1873, -0.8379])
183
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth":
184
+ expected_logits = torch.tensor([-0.7749, -0.2967, -0.6444])
185
+ else:
186
+ raise ValueError(f"Unknown URL: {checkpoint_url}")
187
+
188
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-3)
189
+ assert logits.shape == expected_shape
190
+
191
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
192
+ print(f"Saving model to {pytorch_dump_folder_path}")
193
+ model.save_pretrained(pytorch_dump_folder_path)
194
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
195
+ image_processor.save_pretrained(pytorch_dump_folder_path)
196
+
197
+ print("Pushing model to the hub...")
198
+ model_name = "convnext"
199
+ if "tiny" in checkpoint_url:
200
+ model_name += "-tiny"
201
+ elif "small" in checkpoint_url:
202
+ model_name += "-small"
203
+ elif "base" in checkpoint_url:
204
+ model_name += "-base"
205
+ elif "xlarge" in checkpoint_url:
206
+ model_name += "-xlarge"
207
+ elif "large" in checkpoint_url:
208
+ model_name += "-large"
209
+ if "224" in checkpoint_url:
210
+ model_name += "-224"
211
+ elif "384" in checkpoint_url:
212
+ model_name += "-384"
213
+ if "22k" in checkpoint_url and "1k" not in checkpoint_url:
214
+ model_name += "-22k"
215
+ if "22k" in checkpoint_url and "1k" in checkpoint_url:
216
+ model_name += "-22k-1k"
217
+
218
+ model.push_to_hub(
219
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
220
+ organization="nielsr",
221
+ commit_message="Add model",
222
+ )
223
+
224
+
225
+ if __name__ == "__main__":
226
+ parser = argparse.ArgumentParser()
227
+ # Required parameters
228
+ parser.add_argument(
229
+ "--checkpoint_url",
230
+ default="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
231
+ type=str,
232
+ help="URL of the original ConvNeXT checkpoint you'd like to convert.",
233
+ )
234
+ parser.add_argument(
235
+ "--pytorch_dump_folder_path",
236
+ default=None,
237
+ type=str,
238
+ required=True,
239
+ help="Path to the output PyTorch model directory.",
240
+ )
241
+
242
+ args = parser.parse_args()
243
+ convert_convnext_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for ConvNeXT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_convnext import ConvNextImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class ConvNextFeatureExtractor(ConvNextImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use ConvNextImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for ConvNeXT."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ center_crop,
24
+ get_resize_output_image_size,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ IMAGENET_STANDARD_MEAN,
30
+ IMAGENET_STANDARD_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ infer_channel_dimension_format,
35
+ is_scaled_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ valid_images,
39
+ )
40
+ from ...utils import TensorType, is_vision_available, logging
41
+
42
+
43
+ if is_vision_available():
44
+ import PIL
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ class ConvNextImageProcessor(BaseImageProcessor):
51
+ r"""
52
+ Constructs a ConvNeXT image processor.
53
+
54
+ Args:
55
+ do_resize (`bool`, *optional*, defaults to `True`):
56
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden
57
+ by `do_resize` in the `preprocess` method.
58
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`):
59
+ Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is
60
+ resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will
61
+ be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to
62
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can
63
+ be overriden by `size` in the `preprocess` method.
64
+ crop_pct (`float` *optional*, defaults to 224 / 256):
65
+ Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be
66
+ overriden by `crop_pct` in the `preprocess` method.
67
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
68
+ Resampling filter to use if resizing the image. Can be overriden by `resample` in the `preprocess` method.
69
+ do_rescale (`bool`, *optional*, defaults to `True`):
70
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in
71
+ the `preprocess` method.
72
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
73
+ Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess`
74
+ method.
75
+ do_normalize (`bool`, *optional*, defaults to `True`):
76
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
77
+ method.
78
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
79
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
80
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
81
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
82
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
83
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
84
+ """
85
+
86
+ model_input_names = ["pixel_values"]
87
+
88
+ def __init__(
89
+ self,
90
+ do_resize: bool = True,
91
+ size: Dict[str, int] = None,
92
+ crop_pct: float = None,
93
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
94
+ do_rescale: bool = True,
95
+ rescale_factor: Union[int, float] = 1 / 255,
96
+ do_normalize: bool = True,
97
+ image_mean: Optional[Union[float, List[float]]] = None,
98
+ image_std: Optional[Union[float, List[float]]] = None,
99
+ **kwargs,
100
+ ) -> None:
101
+ super().__init__(**kwargs)
102
+ size = size if size is not None else {"shortest_edge": 384}
103
+ size = get_size_dict(size, default_to_square=False)
104
+
105
+ self.do_resize = do_resize
106
+ self.size = size
107
+ # Default value set here for backwards compatibility where the value in config is None
108
+ self.crop_pct = crop_pct if crop_pct is not None else 224 / 256
109
+ self.resample = resample
110
+ self.do_rescale = do_rescale
111
+ self.rescale_factor = rescale_factor
112
+ self.do_normalize = do_normalize
113
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
114
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
115
+
116
+ def resize(
117
+ self,
118
+ image: np.ndarray,
119
+ size: Dict[str, int],
120
+ crop_pct: float,
121
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
122
+ data_format: Optional[Union[str, ChannelDimension]] = None,
123
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
124
+ **kwargs,
125
+ ) -> np.ndarray:
126
+ """
127
+ Resize an image.
128
+
129
+ Args:
130
+ image (`np.ndarray`):
131
+ Image to resize.
132
+ size (`Dict[str, int]`):
133
+ Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If
134
+ `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`.
135
+ Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`,
136
+ after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`.
137
+ crop_pct (`float`):
138
+ Percentage of the image to crop. Only has an effect if size < 384.
139
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
140
+ Resampling filter to use when resizing the image.
141
+ data_format (`str` or `ChannelDimension`, *optional*):
142
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
143
+ input_data_format (`ChannelDimension` or `str`, *optional*):
144
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
145
+ image.
146
+ """
147
+ size = get_size_dict(size, default_to_square=False)
148
+ if "shortest_edge" not in size:
149
+ raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
150
+ shortest_edge = size["shortest_edge"]
151
+
152
+ if shortest_edge < 384:
153
+ # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
154
+ resize_shortest_edge = int(shortest_edge / crop_pct)
155
+ resize_size = get_resize_output_image_size(
156
+ image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format
157
+ )
158
+ image = resize(
159
+ image=image,
160
+ size=resize_size,
161
+ resample=resample,
162
+ data_format=data_format,
163
+ input_data_format=input_data_format,
164
+ **kwargs,
165
+ )
166
+ # then crop to (shortest_edge, shortest_edge)
167
+ return center_crop(
168
+ image=image,
169
+ size=(shortest_edge, shortest_edge),
170
+ data_format=data_format,
171
+ input_data_format=input_data_format,
172
+ **kwargs,
173
+ )
174
+ else:
175
+ # warping (no cropping) when evaluated at 384 or larger
176
+ return resize(
177
+ image,
178
+ size=(shortest_edge, shortest_edge),
179
+ resample=resample,
180
+ data_format=data_format,
181
+ input_data_format=input_data_format,
182
+ **kwargs,
183
+ )
184
+
185
+ def preprocess(
186
+ self,
187
+ images: ImageInput,
188
+ do_resize: bool = None,
189
+ size: Dict[str, int] = None,
190
+ crop_pct: float = None,
191
+ resample: PILImageResampling = None,
192
+ do_rescale: bool = None,
193
+ rescale_factor: float = None,
194
+ do_normalize: bool = None,
195
+ image_mean: Optional[Union[float, List[float]]] = None,
196
+ image_std: Optional[Union[float, List[float]]] = None,
197
+ return_tensors: Optional[Union[str, TensorType]] = None,
198
+ data_format: ChannelDimension = ChannelDimension.FIRST,
199
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
200
+ **kwargs,
201
+ ) -> PIL.Image.Image:
202
+ """
203
+ Preprocess an image or batch of images.
204
+
205
+ Args:
206
+ images (`ImageInput`):
207
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
208
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
209
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
210
+ Whether to resize the image.
211
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
212
+ Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
213
+ is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
214
+ image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
215
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
216
+ crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
217
+ Percentage of the image to crop if size < 384.
218
+ resample (`int`, *optional*, defaults to `self.resample`):
219
+ Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
220
+ has an effect if `do_resize` is set to `True`.
221
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
222
+ Whether to rescale the image values between [0 - 1].
223
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
224
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
225
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
226
+ Whether to normalize the image.
227
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
228
+ Image mean.
229
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
230
+ Image standard deviation.
231
+ return_tensors (`str` or `TensorType`, *optional*):
232
+ The type of tensors to return. Can be one of:
233
+ - Unset: Return a list of `np.ndarray`.
234
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
235
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
236
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
237
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
238
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
239
+ The channel dimension format for the output image. Can be one of:
240
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
241
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
242
+ - Unset: Use the channel dimension format of the input image.
243
+ input_data_format (`ChannelDimension` or `str`, *optional*):
244
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
245
+ from the input image. Can be one of:
246
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
247
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
248
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
249
+ """
250
+ do_resize = do_resize if do_resize is not None else self.do_resize
251
+ crop_pct = crop_pct if crop_pct is not None else self.crop_pct
252
+ resample = resample if resample is not None else self.resample
253
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
254
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
255
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
256
+ image_mean = image_mean if image_mean is not None else self.image_mean
257
+ image_std = image_std if image_std is not None else self.image_std
258
+
259
+ size = size if size is not None else self.size
260
+ size = get_size_dict(size, default_to_square=False)
261
+
262
+ images = make_list_of_images(images)
263
+
264
+ if not valid_images(images):
265
+ raise ValueError(
266
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
267
+ "torch.Tensor, tf.Tensor or jax.ndarray."
268
+ )
269
+
270
+ if do_resize and size is None or resample is None:
271
+ raise ValueError("Size and resample must be specified if do_resize is True.")
272
+
273
+ if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
274
+ raise ValueError("crop_pct must be specified if size < 384.")
275
+
276
+ if do_rescale and rescale_factor is None:
277
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
278
+
279
+ if do_normalize and (image_mean is None or image_std is None):
280
+ raise ValueError("Image mean and std must be specified if do_normalize is True.")
281
+
282
+ # All transformations expect numpy arrays.
283
+ images = [to_numpy_array(image) for image in images]
284
+
285
+ if is_scaled_image(images[0]) and do_rescale:
286
+ logger.warning_once(
287
+ "It looks like you are trying to rescale already rescaled images. If the input"
288
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
289
+ )
290
+
291
+ if input_data_format is None:
292
+ # We assume that all images have the same channel dimension format.
293
+ input_data_format = infer_channel_dimension_format(images[0])
294
+
295
+ if do_resize:
296
+ images = [
297
+ self.resize(
298
+ image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format
299
+ )
300
+ for image in images
301
+ ]
302
+
303
+ if do_rescale:
304
+ images = [
305
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
306
+ for image in images
307
+ ]
308
+
309
+ if do_normalize:
310
+ images = [
311
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
312
+ for image in images
313
+ ]
314
+
315
+ images = [
316
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
317
+ ]
318
+
319
+ data = {"pixel_values": images}
320
+ return BatchFeature(data=data, tensor_type=return_tensors)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ConvNext model."""
16
+
17
+
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BackboneOutput,
28
+ BaseModelOutputWithNoAttention,
29
+ BaseModelOutputWithPoolingAndNoAttention,
30
+ ImageClassifierOutputWithNoAttention,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import (
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ replace_return_docstrings,
39
+ )
40
+ from ...utils.backbone_utils import BackboneMixin
41
+ from .configuration_convnext import ConvNextConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ # General docstring
47
+ _CONFIG_FOR_DOC = "ConvNextConfig"
48
+
49
+ # Base docstring
50
+ _CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
51
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
52
+
53
+ # Image classification docstring
54
+ _IMAGE_CLASS_CHECKPOINT = "facebook/convnext-tiny-224"
55
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
56
+
57
+ CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
58
+ "facebook/convnext-tiny-224",
59
+ # See all ConvNext models at https://huggingface.co/models?filter=convnext
60
+ ]
61
+
62
+
63
+ # Copied from transformers.models.beit.modeling_beit.drop_path
64
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
65
+ """
66
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
67
+
68
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
69
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
70
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
71
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
72
+ argument.
73
+ """
74
+ if drop_prob == 0.0 or not training:
75
+ return input
76
+ keep_prob = 1 - drop_prob
77
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
78
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
79
+ random_tensor.floor_() # binarize
80
+ output = input.div(keep_prob) * random_tensor
81
+ return output
82
+
83
+
84
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNext
85
+ class ConvNextDropPath(nn.Module):
86
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
87
+
88
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
89
+ super().__init__()
90
+ self.drop_prob = drop_prob
91
+
92
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
93
+ return drop_path(hidden_states, self.drop_prob, self.training)
94
+
95
+ def extra_repr(self) -> str:
96
+ return "p={}".format(self.drop_prob)
97
+
98
+
99
+ class ConvNextLayerNorm(nn.Module):
100
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
101
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
102
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
103
+ """
104
+
105
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
106
+ super().__init__()
107
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
108
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
109
+ self.eps = eps
110
+ self.data_format = data_format
111
+ if self.data_format not in ["channels_last", "channels_first"]:
112
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
113
+ self.normalized_shape = (normalized_shape,)
114
+
115
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
116
+ if self.data_format == "channels_last":
117
+ x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
118
+ elif self.data_format == "channels_first":
119
+ input_dtype = x.dtype
120
+ x = x.float()
121
+ u = x.mean(1, keepdim=True)
122
+ s = (x - u).pow(2).mean(1, keepdim=True)
123
+ x = (x - u) / torch.sqrt(s + self.eps)
124
+ x = x.to(dtype=input_dtype)
125
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
126
+ return x
127
+
128
+
129
+ class ConvNextEmbeddings(nn.Module):
130
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
131
+ found in src/transformers/models/swin/modeling_swin.py.
132
+ """
133
+
134
+ def __init__(self, config):
135
+ super().__init__()
136
+ self.patch_embeddings = nn.Conv2d(
137
+ config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
138
+ )
139
+ self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
140
+ self.num_channels = config.num_channels
141
+
142
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
143
+ num_channels = pixel_values.shape[1]
144
+ if num_channels != self.num_channels:
145
+ raise ValueError(
146
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
147
+ )
148
+ embeddings = self.patch_embeddings(pixel_values)
149
+ embeddings = self.layernorm(embeddings)
150
+ return embeddings
151
+
152
+
153
+ class ConvNextLayer(nn.Module):
154
+ """This corresponds to the `Block` class in the original implementation.
155
+
156
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
157
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
158
+
159
+ The authors used (2) as they find it slightly faster in PyTorch.
160
+
161
+ Args:
162
+ config ([`ConvNextConfig`]): Model configuration class.
163
+ dim (`int`): Number of input channels.
164
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
165
+ """
166
+
167
+ def __init__(self, config, dim, drop_path=0):
168
+ super().__init__()
169
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
170
+ self.layernorm = ConvNextLayerNorm(dim, eps=1e-6)
171
+ self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
172
+ self.act = ACT2FN[config.hidden_act]
173
+ self.pwconv2 = nn.Linear(4 * dim, dim)
174
+ self.layer_scale_parameter = (
175
+ nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
176
+ if config.layer_scale_init_value > 0
177
+ else None
178
+ )
179
+ self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
180
+
181
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
182
+ input = hidden_states
183
+ x = self.dwconv(hidden_states)
184
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
185
+ x = self.layernorm(x)
186
+ x = self.pwconv1(x)
187
+ x = self.act(x)
188
+ x = self.pwconv2(x)
189
+ if self.layer_scale_parameter is not None:
190
+ x = self.layer_scale_parameter * x
191
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
192
+
193
+ x = input + self.drop_path(x)
194
+ return x
195
+
196
+
197
+ class ConvNextStage(nn.Module):
198
+ """ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks.
199
+
200
+ Args:
201
+ config ([`ConvNextConfig`]): Model configuration class.
202
+ in_channels (`int`): Number of input channels.
203
+ out_channels (`int`): Number of output channels.
204
+ depth (`int`): Number of residual blocks.
205
+ drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
206
+ """
207
+
208
+ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
209
+ super().__init__()
210
+
211
+ if in_channels != out_channels or stride > 1:
212
+ self.downsampling_layer = nn.Sequential(
213
+ ConvNextLayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
214
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
215
+ )
216
+ else:
217
+ self.downsampling_layer = nn.Identity()
218
+ drop_path_rates = drop_path_rates or [0.0] * depth
219
+ self.layers = nn.Sequential(
220
+ *[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
221
+ )
222
+
223
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
224
+ hidden_states = self.downsampling_layer(hidden_states)
225
+ hidden_states = self.layers(hidden_states)
226
+ return hidden_states
227
+
228
+
229
+ class ConvNextEncoder(nn.Module):
230
+ def __init__(self, config):
231
+ super().__init__()
232
+ self.stages = nn.ModuleList()
233
+ drop_path_rates = [
234
+ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
235
+ ]
236
+ prev_chs = config.hidden_sizes[0]
237
+ for i in range(config.num_stages):
238
+ out_chs = config.hidden_sizes[i]
239
+ stage = ConvNextStage(
240
+ config,
241
+ in_channels=prev_chs,
242
+ out_channels=out_chs,
243
+ stride=2 if i > 0 else 1,
244
+ depth=config.depths[i],
245
+ drop_path_rates=drop_path_rates[i],
246
+ )
247
+ self.stages.append(stage)
248
+ prev_chs = out_chs
249
+
250
+ def forward(
251
+ self,
252
+ hidden_states: torch.FloatTensor,
253
+ output_hidden_states: Optional[bool] = False,
254
+ return_dict: Optional[bool] = True,
255
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
256
+ all_hidden_states = () if output_hidden_states else None
257
+
258
+ for i, layer_module in enumerate(self.stages):
259
+ if output_hidden_states:
260
+ all_hidden_states = all_hidden_states + (hidden_states,)
261
+
262
+ hidden_states = layer_module(hidden_states)
263
+
264
+ if output_hidden_states:
265
+ all_hidden_states = all_hidden_states + (hidden_states,)
266
+
267
+ if not return_dict:
268
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
269
+
270
+ return BaseModelOutputWithNoAttention(
271
+ last_hidden_state=hidden_states,
272
+ hidden_states=all_hidden_states,
273
+ )
274
+
275
+
276
+ class ConvNextPreTrainedModel(PreTrainedModel):
277
+ """
278
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
279
+ models.
280
+ """
281
+
282
+ config_class = ConvNextConfig
283
+ base_model_prefix = "convnext"
284
+ main_input_name = "pixel_values"
285
+
286
+ def _init_weights(self, module):
287
+ """Initialize the weights"""
288
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
289
+ # Slightly different from the TF version which uses truncated_normal for initialization
290
+ # cf https://github.com/pytorch/pytorch/pull/5617
291
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
292
+ if module.bias is not None:
293
+ module.bias.data.zero_()
294
+ elif isinstance(module, nn.LayerNorm):
295
+ module.bias.data.zero_()
296
+ module.weight.data.fill_(1.0)
297
+
298
+
299
+ CONVNEXT_START_DOCSTRING = r"""
300
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
301
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
302
+ behavior.
303
+
304
+ Parameters:
305
+ config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
306
+ Initializing with a config file does not load the weights associated with the model, only the
307
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
308
+ """
309
+
310
+ CONVNEXT_INPUTS_DOCSTRING = r"""
311
+ Args:
312
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
313
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
314
+ [`ConvNextImageProcessor.__call__`] for details.
315
+
316
+ output_hidden_states (`bool`, *optional*):
317
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
318
+ more detail.
319
+ return_dict (`bool`, *optional*):
320
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
321
+ """
322
+
323
+
324
+ @add_start_docstrings(
325
+ "The bare ConvNext model outputting raw features without any specific head on top.",
326
+ CONVNEXT_START_DOCSTRING,
327
+ )
328
+ class ConvNextModel(ConvNextPreTrainedModel):
329
+ def __init__(self, config):
330
+ super().__init__(config)
331
+ self.config = config
332
+
333
+ self.embeddings = ConvNextEmbeddings(config)
334
+ self.encoder = ConvNextEncoder(config)
335
+
336
+ # final layernorm layer
337
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
338
+
339
+ # Initialize weights and apply final processing
340
+ self.post_init()
341
+
342
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
343
+ @add_code_sample_docstrings(
344
+ checkpoint=_CHECKPOINT_FOR_DOC,
345
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
346
+ config_class=_CONFIG_FOR_DOC,
347
+ modality="vision",
348
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
349
+ )
350
+ def forward(
351
+ self,
352
+ pixel_values: torch.FloatTensor = None,
353
+ output_hidden_states: Optional[bool] = None,
354
+ return_dict: Optional[bool] = None,
355
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
356
+ output_hidden_states = (
357
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
358
+ )
359
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
360
+
361
+ if pixel_values is None:
362
+ raise ValueError("You have to specify pixel_values")
363
+
364
+ embedding_output = self.embeddings(pixel_values)
365
+
366
+ encoder_outputs = self.encoder(
367
+ embedding_output,
368
+ output_hidden_states=output_hidden_states,
369
+ return_dict=return_dict,
370
+ )
371
+
372
+ last_hidden_state = encoder_outputs[0]
373
+
374
+ # global average pooling, (N, C, H, W) -> (N, C)
375
+ pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
376
+
377
+ if not return_dict:
378
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
379
+
380
+ return BaseModelOutputWithPoolingAndNoAttention(
381
+ last_hidden_state=last_hidden_state,
382
+ pooler_output=pooled_output,
383
+ hidden_states=encoder_outputs.hidden_states,
384
+ )
385
+
386
+
387
+ @add_start_docstrings(
388
+ """
389
+ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
390
+ ImageNet.
391
+ """,
392
+ CONVNEXT_START_DOCSTRING,
393
+ )
394
+ class ConvNextForImageClassification(ConvNextPreTrainedModel):
395
+ def __init__(self, config):
396
+ super().__init__(config)
397
+
398
+ self.num_labels = config.num_labels
399
+ self.convnext = ConvNextModel(config)
400
+
401
+ # Classifier head
402
+ self.classifier = (
403
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
404
+ )
405
+
406
+ # Initialize weights and apply final processing
407
+ self.post_init()
408
+
409
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
410
+ @add_code_sample_docstrings(
411
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
412
+ output_type=ImageClassifierOutputWithNoAttention,
413
+ config_class=_CONFIG_FOR_DOC,
414
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
415
+ )
416
+ def forward(
417
+ self,
418
+ pixel_values: torch.FloatTensor = None,
419
+ labels: Optional[torch.LongTensor] = None,
420
+ output_hidden_states: Optional[bool] = None,
421
+ return_dict: Optional[bool] = None,
422
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
423
+ r"""
424
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
425
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
426
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
427
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
428
+ """
429
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
430
+
431
+ outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
432
+
433
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
434
+
435
+ logits = self.classifier(pooled_output)
436
+
437
+ loss = None
438
+ if labels is not None:
439
+ if self.config.problem_type is None:
440
+ if self.num_labels == 1:
441
+ self.config.problem_type = "regression"
442
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
443
+ self.config.problem_type = "single_label_classification"
444
+ else:
445
+ self.config.problem_type = "multi_label_classification"
446
+
447
+ if self.config.problem_type == "regression":
448
+ loss_fct = MSELoss()
449
+ if self.num_labels == 1:
450
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
451
+ else:
452
+ loss = loss_fct(logits, labels)
453
+ elif self.config.problem_type == "single_label_classification":
454
+ loss_fct = CrossEntropyLoss()
455
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
456
+ elif self.config.problem_type == "multi_label_classification":
457
+ loss_fct = BCEWithLogitsLoss()
458
+ loss = loss_fct(logits, labels)
459
+ if not return_dict:
460
+ output = (logits,) + outputs[2:]
461
+ return ((loss,) + output) if loss is not None else output
462
+
463
+ return ImageClassifierOutputWithNoAttention(
464
+ loss=loss,
465
+ logits=logits,
466
+ hidden_states=outputs.hidden_states,
467
+ )
468
+
469
+
470
+ @add_start_docstrings(
471
+ """
472
+ ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer.
473
+ """,
474
+ CONVNEXT_START_DOCSTRING,
475
+ )
476
+ class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin):
477
+ def __init__(self, config):
478
+ super().__init__(config)
479
+ super()._init_backbone(config)
480
+
481
+ self.embeddings = ConvNextEmbeddings(config)
482
+ self.encoder = ConvNextEncoder(config)
483
+ self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
484
+
485
+ # Add layer norms to hidden states of out_features
486
+ hidden_states_norms = {}
487
+ for stage, num_channels in zip(self._out_features, self.channels):
488
+ hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format="channels_first")
489
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
490
+
491
+ # initialize weights and apply final processing
492
+ self.post_init()
493
+
494
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
495
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
496
+ def forward(
497
+ self,
498
+ pixel_values: torch.Tensor,
499
+ output_hidden_states: Optional[bool] = None,
500
+ return_dict: Optional[bool] = None,
501
+ ) -> BackboneOutput:
502
+ """
503
+ Returns:
504
+
505
+ Examples:
506
+
507
+ ```python
508
+ >>> from transformers import AutoImageProcessor, AutoBackbone
509
+ >>> import torch
510
+ >>> from PIL import Image
511
+ >>> import requests
512
+
513
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
514
+ >>> image = Image.open(requests.get(url, stream=True).raw)
515
+
516
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
517
+ >>> model = AutoBackbone.from_pretrained("facebook/convnext-tiny-224")
518
+
519
+ >>> inputs = processor(image, return_tensors="pt")
520
+ >>> outputs = model(**inputs)
521
+ ```"""
522
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
523
+ output_hidden_states = (
524
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
525
+ )
526
+
527
+ embedding_output = self.embeddings(pixel_values)
528
+
529
+ outputs = self.encoder(
530
+ embedding_output,
531
+ output_hidden_states=True,
532
+ return_dict=return_dict,
533
+ )
534
+
535
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
536
+
537
+ feature_maps = ()
538
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
539
+ if stage in self.out_features:
540
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
541
+ feature_maps += (hidden_state,)
542
+
543
+ if not return_dict:
544
+ output = (feature_maps,)
545
+ if output_hidden_states:
546
+ output += (hidden_states,)
547
+ return output
548
+
549
+ return BackboneOutput(
550
+ feature_maps=feature_maps,
551
+ hidden_states=hidden_states if output_hidden_states else None,
552
+ attentions=None,
553
+ )
evalkit_tf437/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py ADDED
@@ -0,0 +1,666 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 ConvNext model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput
27
+ from ...modeling_tf_utils import (
28
+ TFModelInputType,
29
+ TFPreTrainedModel,
30
+ TFSequenceClassificationLoss,
31
+ get_initializer,
32
+ keras_serializable,
33
+ unpack_inputs,
34
+ )
35
+ from ...tf_utils import shape_list
36
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
37
+ from .configuration_convnext import ConvNextConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ _CONFIG_FOR_DOC = "ConvNextConfig"
44
+ _CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
45
+
46
+
47
+ class TFConvNextDropPath(tf.keras.layers.Layer):
48
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
49
+ References:
50
+ (1) github.com:rwightman/pytorch-image-models
51
+ """
52
+
53
+ def __init__(self, drop_path: float, **kwargs):
54
+ super().__init__(**kwargs)
55
+ self.drop_path = drop_path
56
+
57
+ def call(self, x: tf.Tensor, training=None):
58
+ if training:
59
+ keep_prob = 1 - self.drop_path
60
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
61
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
62
+ random_tensor = tf.floor(random_tensor)
63
+ return (x / keep_prob) * random_tensor
64
+ return x
65
+
66
+
67
+ class TFConvNextEmbeddings(tf.keras.layers.Layer):
68
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
69
+ found in src/transformers/models/swin/modeling_swin.py.
70
+ """
71
+
72
+ def __init__(self, config: ConvNextConfig, **kwargs):
73
+ super().__init__(**kwargs)
74
+ self.patch_embeddings = tf.keras.layers.Conv2D(
75
+ filters=config.hidden_sizes[0],
76
+ kernel_size=config.patch_size,
77
+ strides=config.patch_size,
78
+ name="patch_embeddings",
79
+ kernel_initializer=get_initializer(config.initializer_range),
80
+ bias_initializer=tf.keras.initializers.Zeros(),
81
+ )
82
+ self.layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
83
+ self.num_channels = config.num_channels
84
+ self.config = config
85
+
86
+ def call(self, pixel_values):
87
+ if isinstance(pixel_values, dict):
88
+ pixel_values = pixel_values["pixel_values"]
89
+
90
+ tf.debugging.assert_equal(
91
+ shape_list(pixel_values)[1],
92
+ self.num_channels,
93
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
94
+ )
95
+
96
+ # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
97
+ # So change the input format from `NCHW` to `NHWC`.
98
+ # shape = (batch_size, in_height, in_width, in_channels)
99
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
100
+
101
+ embeddings = self.patch_embeddings(pixel_values)
102
+ embeddings = self.layernorm(embeddings)
103
+ return embeddings
104
+
105
+ def build(self, input_shape=None):
106
+ if self.built:
107
+ return
108
+ self.built = True
109
+ if getattr(self, "patch_embeddings", None) is not None:
110
+ with tf.name_scope(self.patch_embeddings.name):
111
+ self.patch_embeddings.build([None, None, None, self.config.num_channels])
112
+ if getattr(self, "layernorm", None) is not None:
113
+ with tf.name_scope(self.layernorm.name):
114
+ self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
115
+
116
+
117
+ class TFConvNextLayer(tf.keras.layers.Layer):
118
+ """This corresponds to the `Block` class in the original implementation.
119
+
120
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
121
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
122
+
123
+ The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
124
+ NHWC ordering, we can just apply the operations straight-away without the permutation.
125
+
126
+ Args:
127
+ config ([`ConvNextConfig`]): Model configuration class.
128
+ dim (`int`): Number of input channels.
129
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
130
+ """
131
+
132
+ def __init__(self, config, dim, drop_path=0.0, **kwargs):
133
+ super().__init__(**kwargs)
134
+ self.dim = dim
135
+ self.config = config
136
+ self.dwconv = tf.keras.layers.Conv2D(
137
+ filters=dim,
138
+ kernel_size=7,
139
+ padding="same",
140
+ groups=dim,
141
+ kernel_initializer=get_initializer(config.initializer_range),
142
+ bias_initializer="zeros",
143
+ name="dwconv",
144
+ ) # depthwise conv
145
+ self.layernorm = tf.keras.layers.LayerNormalization(
146
+ epsilon=1e-6,
147
+ name="layernorm",
148
+ )
149
+ self.pwconv1 = tf.keras.layers.Dense(
150
+ units=4 * dim,
151
+ kernel_initializer=get_initializer(config.initializer_range),
152
+ bias_initializer="zeros",
153
+ name="pwconv1",
154
+ ) # pointwise/1x1 convs, implemented with linear layers
155
+ self.act = get_tf_activation(config.hidden_act)
156
+ self.pwconv2 = tf.keras.layers.Dense(
157
+ units=dim,
158
+ kernel_initializer=get_initializer(config.initializer_range),
159
+ bias_initializer="zeros",
160
+ name="pwconv2",
161
+ )
162
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
163
+ # behaviour.
164
+ self.drop_path = (
165
+ TFConvNextDropPath(drop_path, name="drop_path")
166
+ if drop_path > 0.0
167
+ else tf.keras.layers.Activation("linear", name="drop_path")
168
+ )
169
+
170
+ def build(self, input_shape: tf.TensorShape = None):
171
+ # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
172
+ self.layer_scale_parameter = (
173
+ self.add_weight(
174
+ shape=(self.dim,),
175
+ initializer=tf.keras.initializers.Constant(value=self.config.layer_scale_init_value),
176
+ trainable=True,
177
+ name="layer_scale_parameter",
178
+ )
179
+ if self.config.layer_scale_init_value > 0
180
+ else None
181
+ )
182
+
183
+ if self.built:
184
+ return
185
+ self.built = True
186
+ if getattr(self, "dwconv", None) is not None:
187
+ with tf.name_scope(self.dwconv.name):
188
+ self.dwconv.build([None, None, None, self.dim])
189
+ if getattr(self, "layernorm", None) is not None:
190
+ with tf.name_scope(self.layernorm.name):
191
+ self.layernorm.build([None, None, None, self.dim])
192
+ if getattr(self, "pwconv1", None) is not None:
193
+ with tf.name_scope(self.pwconv1.name):
194
+ self.pwconv1.build([None, None, self.dim])
195
+ if getattr(self, "pwconv2", None) is not None:
196
+ with tf.name_scope(self.pwconv2.name):
197
+ self.pwconv2.build([None, None, 4 * self.dim])
198
+ if getattr(self, "drop_path", None) is not None:
199
+ with tf.name_scope(self.drop_path.name):
200
+ self.drop_path.build(None)
201
+
202
+ def call(self, hidden_states, training=False):
203
+ input = hidden_states
204
+ x = self.dwconv(hidden_states)
205
+ x = self.layernorm(x)
206
+ x = self.pwconv1(x)
207
+ x = self.act(x)
208
+ x = self.pwconv2(x)
209
+
210
+ if self.layer_scale_parameter is not None:
211
+ x = self.layer_scale_parameter * x
212
+
213
+ x = input + self.drop_path(x, training=training)
214
+ return x
215
+
216
+
217
+ class TFConvNextStage(tf.keras.layers.Layer):
218
+ """ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks.
219
+
220
+ Args:
221
+ config (`ConvNextV2Config`):
222
+ Model configuration class.
223
+ in_channels (`int`):
224
+ Number of input channels.
225
+ out_channels (`int`):
226
+ Number of output channels.
227
+ depth (`int`):
228
+ Number of residual blocks.
229
+ drop_path_rates(`List[float]`):
230
+ Stochastic depth rates for each layer.
231
+ """
232
+
233
+ def __init__(
234
+ self,
235
+ config: ConvNextConfig,
236
+ in_channels: int,
237
+ out_channels: int,
238
+ kernel_size: int = 2,
239
+ stride: int = 2,
240
+ depth: int = 2,
241
+ drop_path_rates: Optional[List[float]] = None,
242
+ **kwargs,
243
+ ):
244
+ super().__init__(**kwargs)
245
+ if in_channels != out_channels or stride > 1:
246
+ self.downsampling_layer = [
247
+ tf.keras.layers.LayerNormalization(
248
+ epsilon=1e-6,
249
+ name="downsampling_layer.0",
250
+ ),
251
+ # Inputs to this layer will follow NHWC format since we
252
+ # transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings`
253
+ # layer. All the outputs throughout the model will be in NHWC
254
+ # from this point on until the output where we again change to
255
+ # NCHW.
256
+ tf.keras.layers.Conv2D(
257
+ filters=out_channels,
258
+ kernel_size=kernel_size,
259
+ strides=stride,
260
+ kernel_initializer=get_initializer(config.initializer_range),
261
+ bias_initializer=tf.keras.initializers.Zeros(),
262
+ name="downsampling_layer.1",
263
+ ),
264
+ ]
265
+ else:
266
+ self.downsampling_layer = [tf.identity]
267
+
268
+ drop_path_rates = drop_path_rates or [0.0] * depth
269
+ self.layers = [
270
+ TFConvNextLayer(
271
+ config,
272
+ dim=out_channels,
273
+ drop_path=drop_path_rates[j],
274
+ name=f"layers.{j}",
275
+ )
276
+ for j in range(depth)
277
+ ]
278
+ self.in_channels = in_channels
279
+ self.out_channels = out_channels
280
+ self.stride = stride
281
+
282
+ def call(self, hidden_states):
283
+ for layer in self.downsampling_layer:
284
+ hidden_states = layer(hidden_states)
285
+ for layer in self.layers:
286
+ hidden_states = layer(hidden_states)
287
+ return hidden_states
288
+
289
+ def build(self, input_shape=None):
290
+ if self.built:
291
+ return
292
+ self.built = True
293
+ if getattr(self, "layers", None) is not None:
294
+ for layer in self.layers:
295
+ with tf.name_scope(layer.name):
296
+ layer.build(None)
297
+ if self.in_channels != self.out_channels or self.stride > 1:
298
+ with tf.name_scope(self.downsampling_layer[0].name):
299
+ self.downsampling_layer[0].build([None, None, None, self.in_channels])
300
+ with tf.name_scope(self.downsampling_layer[1].name):
301
+ self.downsampling_layer[1].build([None, None, None, self.in_channels])
302
+
303
+
304
+ class TFConvNextEncoder(tf.keras.layers.Layer):
305
+ def __init__(self, config, **kwargs):
306
+ super().__init__(**kwargs)
307
+ self.stages = []
308
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
309
+ drop_path_rates = tf.split(drop_path_rates, config.depths)
310
+ drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
311
+ prev_chs = config.hidden_sizes[0]
312
+ for i in range(config.num_stages):
313
+ out_chs = config.hidden_sizes[i]
314
+ stage = TFConvNextStage(
315
+ config,
316
+ in_channels=prev_chs,
317
+ out_channels=out_chs,
318
+ stride=2 if i > 0 else 1,
319
+ depth=config.depths[i],
320
+ drop_path_rates=drop_path_rates[i],
321
+ name=f"stages.{i}",
322
+ )
323
+ self.stages.append(stage)
324
+ prev_chs = out_chs
325
+
326
+ def call(self, hidden_states, output_hidden_states=False, return_dict=True):
327
+ all_hidden_states = () if output_hidden_states else None
328
+
329
+ for i, layer_module in enumerate(self.stages):
330
+ if output_hidden_states:
331
+ all_hidden_states = all_hidden_states + (hidden_states,)
332
+
333
+ hidden_states = layer_module(hidden_states)
334
+
335
+ if output_hidden_states:
336
+ all_hidden_states = all_hidden_states + (hidden_states,)
337
+
338
+ if not return_dict:
339
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
340
+
341
+ return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
342
+
343
+ def build(self, input_shape=None):
344
+ for stage in self.stages:
345
+ with tf.name_scope(stage.name):
346
+ stage.build(None)
347
+
348
+
349
+ @keras_serializable
350
+ class TFConvNextMainLayer(tf.keras.layers.Layer):
351
+ config_class = ConvNextConfig
352
+
353
+ def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs):
354
+ super().__init__(**kwargs)
355
+
356
+ self.config = config
357
+ self.embeddings = TFConvNextEmbeddings(config, name="embeddings")
358
+ self.encoder = TFConvNextEncoder(config, name="encoder")
359
+ self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
360
+ # We are setting the `data_format` like so because from here on we will revert to the
361
+ # NCHW output format
362
+ self.pooler = tf.keras.layers.GlobalAvgPool2D(data_format="channels_first") if add_pooling_layer else None
363
+
364
+ @unpack_inputs
365
+ def call(
366
+ self,
367
+ pixel_values: TFModelInputType | None = None,
368
+ output_hidden_states: Optional[bool] = None,
369
+ return_dict: Optional[bool] = None,
370
+ training: bool = False,
371
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
372
+ output_hidden_states = (
373
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
374
+ )
375
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
376
+
377
+ if pixel_values is None:
378
+ raise ValueError("You have to specify pixel_values")
379
+
380
+ embedding_output = self.embeddings(pixel_values, training=training)
381
+
382
+ encoder_outputs = self.encoder(
383
+ embedding_output,
384
+ output_hidden_states=output_hidden_states,
385
+ return_dict=return_dict,
386
+ training=training,
387
+ )
388
+
389
+ last_hidden_state = encoder_outputs[0]
390
+ # Change to NCHW output format have uniformity in the modules
391
+ last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
392
+ pooled_output = self.layernorm(self.pooler(last_hidden_state))
393
+
394
+ # Change the other hidden state outputs to NCHW as well
395
+ if output_hidden_states:
396
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
397
+
398
+ if not return_dict:
399
+ hidden_states = hidden_states if output_hidden_states else ()
400
+ return (last_hidden_state, pooled_output) + hidden_states
401
+
402
+ return TFBaseModelOutputWithPooling(
403
+ last_hidden_state=last_hidden_state,
404
+ pooler_output=pooled_output,
405
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
406
+ )
407
+
408
+ def build(self, input_shape=None):
409
+ if self.built:
410
+ return
411
+ self.built = True
412
+ if getattr(self, "embeddings", None) is not None:
413
+ with tf.name_scope(self.embeddings.name):
414
+ self.embeddings.build(None)
415
+ if getattr(self, "encoder", None) is not None:
416
+ with tf.name_scope(self.encoder.name):
417
+ self.encoder.build(None)
418
+ if getattr(self, "layernorm", None) is not None:
419
+ with tf.name_scope(self.layernorm.name):
420
+ self.layernorm.build([None, self.config.hidden_sizes[-1]])
421
+
422
+
423
+ class TFConvNextPreTrainedModel(TFPreTrainedModel):
424
+ """
425
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
426
+ models.
427
+ """
428
+
429
+ config_class = ConvNextConfig
430
+ base_model_prefix = "convnext"
431
+ main_input_name = "pixel_values"
432
+
433
+
434
+ CONVNEXT_START_DOCSTRING = r"""
435
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
436
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
437
+ etc.)
438
+
439
+ This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
440
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
441
+ behavior.
442
+
443
+ <Tip>
444
+
445
+ TensorFlow models and layers in `transformers` accept two formats as input:
446
+
447
+ - having all inputs as keyword arguments (like PyTorch models), or
448
+ - having all inputs as a list, tuple or dict in the first positional argument.
449
+
450
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
451
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
452
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
453
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
454
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
455
+ positional argument:
456
+
457
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
458
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
459
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
460
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
461
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
462
+
463
+ Note that when creating models and layers with
464
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
465
+ about any of this, as you can just pass inputs like you would to any other Python function!
466
+
467
+ </Tip>
468
+
469
+ Parameters:
470
+ config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
471
+ Initializing with a config file does not load the weights associated with the model, only the
472
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
473
+ """
474
+
475
+ CONVNEXT_INPUTS_DOCSTRING = r"""
476
+ Args:
477
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
478
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
479
+ [`ConvNextImageProcessor.__call__`] for details.
480
+
481
+ output_hidden_states (`bool`, *optional*):
482
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
483
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
484
+ used instead.
485
+ return_dict (`bool`, *optional*):
486
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
487
+ eager mode, in graph mode the value will always be set to True.
488
+ """
489
+
490
+
491
+ @add_start_docstrings(
492
+ "The bare ConvNext model outputting raw features without any specific head on top.",
493
+ CONVNEXT_START_DOCSTRING,
494
+ )
495
+ class TFConvNextModel(TFConvNextPreTrainedModel):
496
+ def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs):
497
+ super().__init__(config, *inputs, **kwargs)
498
+ self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name="convnext")
499
+
500
+ @unpack_inputs
501
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
502
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
503
+ def call(
504
+ self,
505
+ pixel_values: TFModelInputType | None = None,
506
+ output_hidden_states: Optional[bool] = None,
507
+ return_dict: Optional[bool] = None,
508
+ training: bool = False,
509
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
510
+ r"""
511
+ Returns:
512
+
513
+ Examples:
514
+
515
+ ```python
516
+ >>> from transformers import AutoImageProcessor, TFConvNextModel
517
+ >>> from PIL import Image
518
+ >>> import requests
519
+
520
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
521
+ >>> image = Image.open(requests.get(url, stream=True).raw)
522
+
523
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
524
+ >>> model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224")
525
+
526
+ >>> inputs = image_processor(images=image, return_tensors="tf")
527
+ >>> outputs = model(**inputs)
528
+ >>> last_hidden_states = outputs.last_hidden_state
529
+ ```"""
530
+ output_hidden_states = (
531
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
532
+ )
533
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
534
+
535
+ if pixel_values is None:
536
+ raise ValueError("You have to specify pixel_values")
537
+
538
+ outputs = self.convnext(
539
+ pixel_values=pixel_values,
540
+ output_hidden_states=output_hidden_states,
541
+ return_dict=return_dict,
542
+ training=training,
543
+ )
544
+
545
+ if not return_dict:
546
+ return (outputs[0],) + outputs[1:]
547
+
548
+ return TFBaseModelOutputWithPooling(
549
+ last_hidden_state=outputs.last_hidden_state,
550
+ pooler_output=outputs.pooler_output,
551
+ hidden_states=outputs.hidden_states,
552
+ )
553
+
554
+ def build(self, input_shape=None):
555
+ if self.built:
556
+ return
557
+ self.built = True
558
+ if getattr(self, "convnext", None) is not None:
559
+ with tf.name_scope(self.convnext.name):
560
+ self.convnext.build(None)
561
+
562
+
563
+ @add_start_docstrings(
564
+ """
565
+ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
566
+ ImageNet.
567
+ """,
568
+ CONVNEXT_START_DOCSTRING,
569
+ )
570
+ class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss):
571
+ def __init__(self, config: ConvNextConfig, *inputs, **kwargs):
572
+ super().__init__(config, *inputs, **kwargs)
573
+
574
+ self.num_labels = config.num_labels
575
+ self.convnext = TFConvNextMainLayer(config, name="convnext")
576
+
577
+ # Classifier head
578
+ self.classifier = tf.keras.layers.Dense(
579
+ units=config.num_labels,
580
+ kernel_initializer=get_initializer(config.initializer_range),
581
+ bias_initializer="zeros",
582
+ name="classifier",
583
+ )
584
+ self.config = config
585
+
586
+ @unpack_inputs
587
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
588
+ @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
589
+ def call(
590
+ self,
591
+ pixel_values: TFModelInputType | None = None,
592
+ output_hidden_states: Optional[bool] = None,
593
+ return_dict: Optional[bool] = None,
594
+ labels: np.ndarray | tf.Tensor | None = None,
595
+ training: Optional[bool] = False,
596
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
597
+ r"""
598
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
599
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
600
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
601
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
602
+
603
+ Returns:
604
+
605
+ Examples:
606
+
607
+ ```python
608
+ >>> from transformers import AutoImageProcessor, TFConvNextForImageClassification
609
+ >>> import tensorflow as tf
610
+ >>> from PIL import Image
611
+ >>> import requests
612
+
613
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
614
+ >>> image = Image.open(requests.get(url, stream=True).raw)
615
+
616
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
617
+ >>> model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224")
618
+
619
+ >>> inputs = image_processor(images=image, return_tensors="tf")
620
+ >>> outputs = model(**inputs)
621
+ >>> logits = outputs.logits
622
+ >>> # model predicts one of the 1000 ImageNet classes
623
+ >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
624
+ >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
625
+ ```"""
626
+ output_hidden_states = (
627
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
628
+ )
629
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
630
+
631
+ if pixel_values is None:
632
+ raise ValueError("You have to specify pixel_values")
633
+
634
+ outputs = self.convnext(
635
+ pixel_values,
636
+ output_hidden_states=output_hidden_states,
637
+ return_dict=return_dict,
638
+ training=training,
639
+ )
640
+
641
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
642
+
643
+ logits = self.classifier(pooled_output)
644
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
645
+
646
+ if not return_dict:
647
+ output = (logits,) + outputs[2:]
648
+ return ((loss,) + output) if loss is not None else output
649
+
650
+ return TFSequenceClassifierOutput(
651
+ loss=loss,
652
+ logits=logits,
653
+ hidden_states=outputs.hidden_states,
654
+ )
655
+
656
+ def build(self, input_shape=None):
657
+ if self.built:
658
+ return
659
+ self.built = True
660
+ if getattr(self, "convnext", None) is not None:
661
+ with tf.name_scope(self.convnext.name):
662
+ self.convnext.build(None)
663
+ if getattr(self, "classifier", None) is not None:
664
+ if hasattr(self.classifier, "name"):
665
+ with tf.name_scope(self.classifier.name):
666
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
evalkit_tf437/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc ADDED
Binary file (4.67 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RetriBERT."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ....utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {
31
+ "vocab_file": {
32
+ "yjernite/retribert-base-uncased": (
33
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
34
+ ),
35
+ }
36
+ }
37
+
38
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
39
+ "yjernite/retribert-base-uncased": 512,
40
+ }
41
+
42
+
43
+ PRETRAINED_INIT_CONFIGURATION = {
44
+ "yjernite/retribert-base-uncased": {"do_lower_case": True},
45
+ }
46
+
47
+
48
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
49
+ def load_vocab(vocab_file):
50
+ """Loads a vocabulary file into a dictionary."""
51
+ vocab = collections.OrderedDict()
52
+ with open(vocab_file, "r", encoding="utf-8") as reader:
53
+ tokens = reader.readlines()
54
+ for index, token in enumerate(tokens):
55
+ token = token.rstrip("\n")
56
+ vocab[token] = index
57
+ return vocab
58
+
59
+
60
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
61
+ def whitespace_tokenize(text):
62
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
63
+ text = text.strip()
64
+ if not text:
65
+ return []
66
+ tokens = text.split()
67
+ return tokens
68
+
69
+
70
+ class RetriBertTokenizer(PreTrainedTokenizer):
71
+ r"""
72
+ Constructs a RetriBERT tokenizer.
73
+
74
+ [`RetriBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting
75
+ and wordpiece.
76
+
77
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer
78
+ to: this superclass for more information regarding those methods.
79
+
80
+ Args:
81
+ vocab_file (`str`):
82
+ File containing the vocabulary.
83
+ do_lower_case (`bool`, *optional*, defaults to `True`):
84
+ Whether or not to lowercase the input when tokenizing.
85
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to do basic tokenization before WordPiece.
87
+ never_split (`Iterable`, *optional*):
88
+ Collection of tokens which will never be split during tokenization. Only has an effect when
89
+ `do_basic_tokenize=True`
90
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
91
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
92
+ token instead.
93
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
94
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
95
+ sequence classification or for a text and a question for question answering. It is also used as the last
96
+ token of a sequence built with special tokens.
97
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
98
+ The token used for padding, for example when batching sequences of different lengths.
99
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
100
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
101
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
102
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
103
+ The token used for masking values. This is the token used when training this model with masked language
104
+ modeling. This is the token which the model will try to predict.
105
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
106
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
107
+ [issue](https://github.com/huggingface/transformers/issues/328)).
108
+ strip_accents (`bool`, *optional*):
109
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
110
+ value for `lowercase` (as in the original BERT).
111
+ """
112
+
113
+ vocab_files_names = VOCAB_FILES_NAMES
114
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
115
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
116
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
117
+ model_input_names = ["input_ids", "attention_mask"]
118
+
119
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.__init__
120
+ def __init__(
121
+ self,
122
+ vocab_file,
123
+ do_lower_case=True,
124
+ do_basic_tokenize=True,
125
+ never_split=None,
126
+ unk_token="[UNK]",
127
+ sep_token="[SEP]",
128
+ pad_token="[PAD]",
129
+ cls_token="[CLS]",
130
+ mask_token="[MASK]",
131
+ tokenize_chinese_chars=True,
132
+ strip_accents=None,
133
+ **kwargs,
134
+ ):
135
+ if not os.path.isfile(vocab_file):
136
+ raise ValueError(
137
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
138
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
139
+ )
140
+ self.vocab = load_vocab(vocab_file)
141
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
142
+ self.do_basic_tokenize = do_basic_tokenize
143
+ if do_basic_tokenize:
144
+ self.basic_tokenizer = BasicTokenizer(
145
+ do_lower_case=do_lower_case,
146
+ never_split=never_split,
147
+ tokenize_chinese_chars=tokenize_chinese_chars,
148
+ strip_accents=strip_accents,
149
+ )
150
+
151
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
152
+
153
+ super().__init__(
154
+ do_lower_case=do_lower_case,
155
+ do_basic_tokenize=do_basic_tokenize,
156
+ never_split=never_split,
157
+ unk_token=unk_token,
158
+ sep_token=sep_token,
159
+ pad_token=pad_token,
160
+ cls_token=cls_token,
161
+ mask_token=mask_token,
162
+ tokenize_chinese_chars=tokenize_chinese_chars,
163
+ strip_accents=strip_accents,
164
+ **kwargs,
165
+ )
166
+
167
+ @property
168
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
169
+ def do_lower_case(self):
170
+ return self.basic_tokenizer.do_lower_case
171
+
172
+ @property
173
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
174
+ def vocab_size(self):
175
+ return len(self.vocab)
176
+
177
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
178
+ def get_vocab(self):
179
+ return dict(self.vocab, **self.added_tokens_encoder)
180
+
181
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
182
+ def _tokenize(self, text, split_special_tokens=False):
183
+ split_tokens = []
184
+ if self.do_basic_tokenize:
185
+ for token in self.basic_tokenizer.tokenize(
186
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
187
+ ):
188
+ # If the token is part of the never_split set
189
+ if token in self.basic_tokenizer.never_split:
190
+ split_tokens.append(token)
191
+ else:
192
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
193
+ else:
194
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
195
+ return split_tokens
196
+
197
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
198
+ def _convert_token_to_id(self, token):
199
+ """Converts a token (str) in an id using the vocab."""
200
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
201
+
202
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
203
+ def _convert_id_to_token(self, index):
204
+ """Converts an index (integer) in a token (str) using the vocab."""
205
+ return self.ids_to_tokens.get(index, self.unk_token)
206
+
207
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
208
+ def convert_tokens_to_string(self, tokens):
209
+ """Converts a sequence of tokens (string) in a single string."""
210
+ out_string = " ".join(tokens).replace(" ##", "").strip()
211
+ return out_string
212
+
213
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
214
+ def build_inputs_with_special_tokens(
215
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
216
+ ) -> List[int]:
217
+ """
218
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
219
+ adding special tokens. A BERT sequence has the following format:
220
+
221
+ - single sequence: `[CLS] X [SEP]`
222
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
223
+
224
+ Args:
225
+ token_ids_0 (`List[int]`):
226
+ List of IDs to which the special tokens will be added.
227
+ token_ids_1 (`List[int]`, *optional*):
228
+ Optional second list of IDs for sequence pairs.
229
+
230
+ Returns:
231
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
232
+ """
233
+ if token_ids_1 is None:
234
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
235
+ cls = [self.cls_token_id]
236
+ sep = [self.sep_token_id]
237
+ return cls + token_ids_0 + sep + token_ids_1 + sep
238
+
239
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
240
+ def get_special_tokens_mask(
241
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
242
+ ) -> List[int]:
243
+ """
244
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
245
+ special tokens using the tokenizer `prepare_for_model` method.
246
+
247
+ Args:
248
+ token_ids_0 (`List[int]`):
249
+ List of IDs.
250
+ token_ids_1 (`List[int]`, *optional*):
251
+ Optional second list of IDs for sequence pairs.
252
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
253
+ Whether or not the token list is already formatted with special tokens for the model.
254
+
255
+ Returns:
256
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
257
+ """
258
+
259
+ if already_has_special_tokens:
260
+ return super().get_special_tokens_mask(
261
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
262
+ )
263
+
264
+ if token_ids_1 is not None:
265
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
266
+ return [1] + ([0] * len(token_ids_0)) + [1]
267
+
268
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
269
+ def create_token_type_ids_from_sequences(
270
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
271
+ ) -> List[int]:
272
+ """
273
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
274
+ pair mask has the following format:
275
+
276
+ ```
277
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
278
+ | first sequence | second sequence |
279
+ ```
280
+
281
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
282
+
283
+ Args:
284
+ token_ids_0 (`List[int]`):
285
+ List of IDs.
286
+ token_ids_1 (`List[int]`, *optional*):
287
+ Optional second list of IDs for sequence pairs.
288
+
289
+ Returns:
290
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
291
+ """
292
+ sep = [self.sep_token_id]
293
+ cls = [self.cls_token_id]
294
+ if token_ids_1 is None:
295
+ return len(cls + token_ids_0 + sep) * [0]
296
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
297
+
298
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
299
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
300
+ index = 0
301
+ if os.path.isdir(save_directory):
302
+ vocab_file = os.path.join(
303
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
304
+ )
305
+ else:
306
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
307
+ with open(vocab_file, "w", encoding="utf-8") as writer:
308
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
309
+ if index != token_index:
310
+ logger.warning(
311
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
312
+ " Please check that the vocabulary is not corrupted!"
313
+ )
314
+ index = token_index
315
+ writer.write(token + "\n")
316
+ index += 1
317
+ return (vocab_file,)
318
+
319
+
320
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
321
+ class BasicTokenizer(object):
322
+ """
323
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
324
+
325
+ Args:
326
+ do_lower_case (`bool`, *optional*, defaults to `True`):
327
+ Whether or not to lowercase the input when tokenizing.
328
+ never_split (`Iterable`, *optional*):
329
+ Collection of tokens which will never be split during tokenization. Only has an effect when
330
+ `do_basic_tokenize=True`
331
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
332
+ Whether or not to tokenize Chinese characters.
333
+
334
+ This should likely be deactivated for Japanese (see this
335
+ [issue](https://github.com/huggingface/transformers/issues/328)).
336
+ strip_accents (`bool`, *optional*):
337
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
338
+ value for `lowercase` (as in the original BERT).
339
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
340
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
341
+ the full context of the words, such as contractions.
342
+ """
343
+
344
+ def __init__(
345
+ self,
346
+ do_lower_case=True,
347
+ never_split=None,
348
+ tokenize_chinese_chars=True,
349
+ strip_accents=None,
350
+ do_split_on_punc=True,
351
+ ):
352
+ if never_split is None:
353
+ never_split = []
354
+ self.do_lower_case = do_lower_case
355
+ self.never_split = set(never_split)
356
+ self.tokenize_chinese_chars = tokenize_chinese_chars
357
+ self.strip_accents = strip_accents
358
+ self.do_split_on_punc = do_split_on_punc
359
+
360
+ def tokenize(self, text, never_split=None):
361
+ """
362
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
363
+
364
+ Args:
365
+ never_split (`List[str]`, *optional*)
366
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
367
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
368
+ """
369
+ # union() returns a new set by concatenating the two sets.
370
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
371
+ text = self._clean_text(text)
372
+
373
+ # This was added on November 1st, 2018 for the multilingual and Chinese
374
+ # models. This is also applied to the English models now, but it doesn't
375
+ # matter since the English models were not trained on any Chinese data
376
+ # and generally don't have any Chinese data in them (there are Chinese
377
+ # characters in the vocabulary because Wikipedia does have some Chinese
378
+ # words in the English Wikipedia.).
379
+ if self.tokenize_chinese_chars:
380
+ text = self._tokenize_chinese_chars(text)
381
+ # prevents treating the same character with different unicode codepoints as different characters
382
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
383
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
384
+ split_tokens = []
385
+ for token in orig_tokens:
386
+ if token not in never_split:
387
+ if self.do_lower_case:
388
+ token = token.lower()
389
+ if self.strip_accents is not False:
390
+ token = self._run_strip_accents(token)
391
+ elif self.strip_accents:
392
+ token = self._run_strip_accents(token)
393
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
394
+
395
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
396
+ return output_tokens
397
+
398
+ def _run_strip_accents(self, text):
399
+ """Strips accents from a piece of text."""
400
+ text = unicodedata.normalize("NFD", text)
401
+ output = []
402
+ for char in text:
403
+ cat = unicodedata.category(char)
404
+ if cat == "Mn":
405
+ continue
406
+ output.append(char)
407
+ return "".join(output)
408
+
409
+ def _run_split_on_punc(self, text, never_split=None):
410
+ """Splits punctuation on a piece of text."""
411
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
412
+ return [text]
413
+ chars = list(text)
414
+ i = 0
415
+ start_new_word = True
416
+ output = []
417
+ while i < len(chars):
418
+ char = chars[i]
419
+ if _is_punctuation(char):
420
+ output.append([char])
421
+ start_new_word = True
422
+ else:
423
+ if start_new_word:
424
+ output.append([])
425
+ start_new_word = False
426
+ output[-1].append(char)
427
+ i += 1
428
+
429
+ return ["".join(x) for x in output]
430
+
431
+ def _tokenize_chinese_chars(self, text):
432
+ """Adds whitespace around any CJK character."""
433
+ output = []
434
+ for char in text:
435
+ cp = ord(char)
436
+ if self._is_chinese_char(cp):
437
+ output.append(" ")
438
+ output.append(char)
439
+ output.append(" ")
440
+ else:
441
+ output.append(char)
442
+ return "".join(output)
443
+
444
+ def _is_chinese_char(self, cp):
445
+ """Checks whether CP is the codepoint of a CJK character."""
446
+ # This defines a "chinese character" as anything in the CJK Unicode block:
447
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
448
+ #
449
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
450
+ # despite its name. The modern Korean Hangul alphabet is a different block,
451
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
452
+ # space-separated words, so they are not treated specially and handled
453
+ # like the all of the other languages.
454
+ if (
455
+ (cp >= 0x4E00 and cp <= 0x9FFF)
456
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
457
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
458
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
459
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
460
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
461
+ or (cp >= 0xF900 and cp <= 0xFAFF)
462
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
463
+ ): #
464
+ return True
465
+
466
+ return False
467
+
468
+ def _clean_text(self, text):
469
+ """Performs invalid character removal and whitespace cleanup on text."""
470
+ output = []
471
+ for char in text:
472
+ cp = ord(char)
473
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
474
+ continue
475
+ if _is_whitespace(char):
476
+ output.append(" ")
477
+ else:
478
+ output.append(char)
479
+ return "".join(output)
480
+
481
+
482
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
483
+ class WordpieceTokenizer(object):
484
+ """Runs WordPiece tokenization."""
485
+
486
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
487
+ self.vocab = vocab
488
+ self.unk_token = unk_token
489
+ self.max_input_chars_per_word = max_input_chars_per_word
490
+
491
+ def tokenize(self, text):
492
+ """
493
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
494
+ tokenization using the given vocabulary.
495
+
496
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
497
+
498
+ Args:
499
+ text: A single token or whitespace separated tokens. This should have
500
+ already been passed through *BasicTokenizer*.
501
+
502
+ Returns:
503
+ A list of wordpiece tokens.
504
+ """
505
+
506
+ output_tokens = []
507
+ for token in whitespace_tokenize(text):
508
+ chars = list(token)
509
+ if len(chars) > self.max_input_chars_per_word:
510
+ output_tokens.append(self.unk_token)
511
+ continue
512
+
513
+ is_bad = False
514
+ start = 0
515
+ sub_tokens = []
516
+ while start < len(chars):
517
+ end = len(chars)
518
+ cur_substr = None
519
+ while start < end:
520
+ substr = "".join(chars[start:end])
521
+ if start > 0:
522
+ substr = "##" + substr
523
+ if substr in self.vocab:
524
+ cur_substr = substr
525
+ break
526
+ end -= 1
527
+ if cur_substr is None:
528
+ is_bad = True
529
+ break
530
+ sub_tokens.append(cur_substr)
531
+ start = end
532
+
533
+ if is_bad:
534
+ output_tokens.append(self.unk_token)
535
+ else:
536
+ output_tokens.extend(sub_tokens)
537
+ return output_tokens
evalkit_tf437/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RetriBERT."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ....tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ....utils import logging
24
+ from .tokenization_retribert import RetriBertTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+ PRETRAINED_VOCAB_FILES_MAP = {
32
+ "vocab_file": {
33
+ "yjernite/retribert-base-uncased": (
34
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
35
+ ),
36
+ },
37
+ "tokenizer_file": {
38
+ "yjernite/retribert-base-uncased": (
39
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
40
+ ),
41
+ },
42
+ }
43
+
44
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
45
+ "yjernite/retribert-base-uncased": 512,
46
+ }
47
+
48
+
49
+ PRETRAINED_INIT_CONFIGURATION = {
50
+ "yjernite/retribert-base-uncased": {"do_lower_case": True},
51
+ }
52
+
53
+
54
+ class RetriBertTokenizerFast(PreTrainedTokenizerFast):
55
+ r"""
56
+ Construct a "fast" RetriBERT tokenizer (backed by HuggingFace's *tokenizers* library).
57
+
58
+ [`RetriBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation
59
+ splitting and wordpiece.
60
+
61
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
62
+ refer to this superclass for more information regarding those methods.
63
+
64
+ Args:
65
+ vocab_file (`str`):
66
+ File containing the vocabulary.
67
+ do_lower_case (`bool`, *optional*, defaults to `True`):
68
+ Whether or not to lowercase the input when tokenizing.
69
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
73
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
74
+ sequence classification or for a text and a question for question answering. It is also used as the last
75
+ token of a sequence built with special tokens.
76
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
77
+ The token used for padding, for example when batching sequences of different lengths.
78
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
79
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
80
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
81
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
82
+ The token used for masking values. This is the token used when training this model with masked language
83
+ modeling. This is the token which the model will try to predict.
84
+ clean_text (`bool`, *optional*, defaults to `True`):
85
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
86
+ whitespaces by the classic one.
87
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
88
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
89
+ issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original BERT).
93
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
94
+ The prefix for subwords.
95
+ """
96
+
97
+ vocab_files_names = VOCAB_FILES_NAMES
98
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
99
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
100
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
101
+ slow_tokenizer_class = RetriBertTokenizer
102
+ model_input_names = ["input_ids", "attention_mask"]
103
+
104
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.__init__
105
+ def __init__(
106
+ self,
107
+ vocab_file=None,
108
+ tokenizer_file=None,
109
+ do_lower_case=True,
110
+ unk_token="[UNK]",
111
+ sep_token="[SEP]",
112
+ pad_token="[PAD]",
113
+ cls_token="[CLS]",
114
+ mask_token="[MASK]",
115
+ tokenize_chinese_chars=True,
116
+ strip_accents=None,
117
+ **kwargs,
118
+ ):
119
+ super().__init__(
120
+ vocab_file,
121
+ tokenizer_file=tokenizer_file,
122
+ do_lower_case=do_lower_case,
123
+ unk_token=unk_token,
124
+ sep_token=sep_token,
125
+ pad_token=pad_token,
126
+ cls_token=cls_token,
127
+ mask_token=mask_token,
128
+ tokenize_chinese_chars=tokenize_chinese_chars,
129
+ strip_accents=strip_accents,
130
+ **kwargs,
131
+ )
132
+
133
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
134
+ if (
135
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
136
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
137
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
138
+ ):
139
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
140
+ normalizer_state["lowercase"] = do_lower_case
141
+ normalizer_state["strip_accents"] = strip_accents
142
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
143
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
144
+
145
+ self.do_lower_case = do_lower_case
146
+
147
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
148
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
149
+ """
150
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
151
+ adding special tokens. A BERT sequence has the following format:
152
+
153
+ - single sequence: `[CLS] X [SEP]`
154
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
155
+
156
+ Args:
157
+ token_ids_0 (`List[int]`):
158
+ List of IDs to which the special tokens will be added.
159
+ token_ids_1 (`List[int]`, *optional*):
160
+ Optional second list of IDs for sequence pairs.
161
+
162
+ Returns:
163
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
164
+ """
165
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
166
+
167
+ if token_ids_1 is not None:
168
+ output += token_ids_1 + [self.sep_token_id]
169
+
170
+ return output
171
+
172
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
173
+ def create_token_type_ids_from_sequences(
174
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
175
+ ) -> List[int]:
176
+ """
177
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
178
+ pair mask has the following format:
179
+
180
+ ```
181
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
182
+ | first sequence | second sequence |
183
+ ```
184
+
185
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
186
+
187
+ Args:
188
+ token_ids_0 (`List[int]`):
189
+ List of IDs.
190
+ token_ids_1 (`List[int]`, *optional*):
191
+ Optional second list of IDs for sequence pairs.
192
+
193
+ Returns:
194
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
195
+ """
196
+ sep = [self.sep_token_id]
197
+ cls = [self.cls_token_id]
198
+ if token_ids_1 is None:
199
+ return len(cls + token_ids_0 + sep) * [0]
200
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
201
+
202
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
203
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
204
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
205
+ return tuple(files)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__init__.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_fastspeech2_conformer": [
25
+ "FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP",
26
+ "FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
27
+ "FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP",
28
+ "FastSpeech2ConformerConfig",
29
+ "FastSpeech2ConformerHifiGanConfig",
30
+ "FastSpeech2ConformerWithHifiGanConfig",
31
+ ],
32
+ "tokenization_fastspeech2_conformer": ["FastSpeech2ConformerTokenizer"],
33
+ }
34
+
35
+ try:
36
+ if not is_torch_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["modeling_fastspeech2_conformer"] = [
42
+ "FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
43
+ "FastSpeech2ConformerWithHifiGan",
44
+ "FastSpeech2ConformerHifiGan",
45
+ "FastSpeech2ConformerModel",
46
+ "FastSpeech2ConformerPreTrainedModel",
47
+ ]
48
+
49
+ if TYPE_CHECKING:
50
+ from .configuration_fastspeech2_conformer import (
51
+ FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP,
52
+ FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
53
+ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP,
54
+ FastSpeech2ConformerConfig,
55
+ FastSpeech2ConformerHifiGanConfig,
56
+ FastSpeech2ConformerWithHifiGanConfig,
57
+ )
58
+ from .tokenization_fastspeech2_conformer import FastSpeech2ConformerTokenizer
59
+
60
+ try:
61
+ if not is_torch_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ from .modeling_fastspeech2_conformer import (
67
+ FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
68
+ FastSpeech2ConformerHifiGan,
69
+ FastSpeech2ConformerModel,
70
+ FastSpeech2ConformerPreTrainedModel,
71
+ FastSpeech2ConformerWithHifiGan,
72
+ )
73
+
74
+ else:
75
+ import sys
76
+
77
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/configuration_fastspeech2_conformer.cpython-310.pyc ADDED
Binary file (20.8 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.48 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_hifigan.cpython-310.pyc ADDED
Binary file (3.84 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_model_with_hifigan.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/modeling_fastspeech2_conformer.cpython-310.pyc ADDED
Binary file (57.2 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/tokenization_fastspeech2_conformer.cpython-310.pyc ADDED
Binary file (6.72 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py ADDED
@@ -0,0 +1,488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ FastSpeech2Conformer model configuration"""
16
+
17
+ from typing import Dict
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27
+ "espnet/fastspeech2_conformer": "https://huggingface.co/espnet/fastspeech2_conformer/raw/main/config.json",
28
+ }
29
+
30
+ FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
31
+ "espnet/fastspeech2_conformer_hifigan": "https://huggingface.co/espnet/fastspeech2_conformer_hifigan/raw/main/config.json",
32
+ }
33
+
34
+ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
35
+ "espnet/fastspeech2_conformer_with_hifigan": "https://huggingface.co/espnet/fastspeech2_conformer_with_hifigan/raw/main/config.json",
36
+ }
37
+
38
+
39
+ class FastSpeech2ConformerConfig(PretrainedConfig):
40
+ r"""
41
+ This is the configuration class to store the configuration of a [`FastSpeech2ConformerModel`]. It is used to
42
+ instantiate a FastSpeech2Conformer model according to the specified arguments, defining the model architecture.
43
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
44
+ FastSpeech2Conformer [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer)
45
+ architecture.
46
+
47
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
48
+ documentation from [`PretrainedConfig`] for more information.
49
+
50
+ Args:
51
+ hidden_size (`int`, *optional*, defaults to 384):
52
+ The dimensionality of the hidden layers.
53
+ vocab_size (`int`, *optional*, defaults to 78):
54
+ The size of the vocabulary.
55
+ num_mel_bins (`int`, *optional*, defaults to 80):
56
+ The number of mel filters used in the filter bank.
57
+ encoder_num_attention_heads (`int`, *optional*, defaults to 2):
58
+ The number of attention heads in the encoder.
59
+ encoder_layers (`int`, *optional*, defaults to 4):
60
+ The number of layers in the encoder.
61
+ encoder_linear_units (`int`, *optional*, defaults to 1536):
62
+ The number of units in the linear layer of the encoder.
63
+ decoder_layers (`int`, *optional*, defaults to 4):
64
+ The number of layers in the decoder.
65
+ decoder_num_attention_heads (`int`, *optional*, defaults to 2):
66
+ The number of attention heads in the decoder.
67
+ decoder_linear_units (`int`, *optional*, defaults to 1536):
68
+ The number of units in the linear layer of the decoder.
69
+ speech_decoder_postnet_layers (`int`, *optional*, defaults to 5):
70
+ The number of layers in the post-net of the speech decoder.
71
+ speech_decoder_postnet_units (`int`, *optional*, defaults to 256):
72
+ The number of units in the post-net layers of the speech decoder.
73
+ speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5):
74
+ The kernel size in the post-net of the speech decoder.
75
+ positionwise_conv_kernel_size (`int`, *optional*, defaults to 3):
76
+ The size of the convolution kernel used in the position-wise layer.
77
+ encoder_normalize_before (`bool`, *optional*, defaults to `False`):
78
+ Specifies whether to normalize before encoder layers.
79
+ decoder_normalize_before (`bool`, *optional*, defaults to `False`):
80
+ Specifies whether to normalize before decoder layers.
81
+ encoder_concat_after (`bool`, *optional*, defaults to `False`):
82
+ Specifies whether to concatenate after encoder layers.
83
+ decoder_concat_after (`bool`, *optional*, defaults to `False`):
84
+ Specifies whether to concatenate after decoder layers.
85
+ reduction_factor (`int`, *optional*, defaults to 1):
86
+ The factor by which the speech frame rate is reduced.
87
+ speaking_speed (`float`, *optional*, defaults to 1.0):
88
+ The speed of the speech produced.
89
+ use_macaron_style_in_conformer (`bool`, *optional*, defaults to `True`):
90
+ Specifies whether to use macaron style in the conformer.
91
+ use_cnn_in_conformer (`bool`, *optional*, defaults to `True`):
92
+ Specifies whether to use convolutional neural networks in the conformer.
93
+ encoder_kernel_size (`int`, *optional*, defaults to 7):
94
+ The kernel size used in the encoder.
95
+ decoder_kernel_size (`int`, *optional*, defaults to 31):
96
+ The kernel size used in the decoder.
97
+ duration_predictor_layers (`int`, *optional*, defaults to 2):
98
+ The number of layers in the duration predictor.
99
+ duration_predictor_channels (`int`, *optional*, defaults to 256):
100
+ The number of channels in the duration predictor.
101
+ duration_predictor_kernel_size (`int`, *optional*, defaults to 3):
102
+ The kernel size used in the duration predictor.
103
+ energy_predictor_layers (`int`, *optional*, defaults to 2):
104
+ The number of layers in the energy predictor.
105
+ energy_predictor_channels (`int`, *optional*, defaults to 256):
106
+ The number of channels in the energy predictor.
107
+ energy_predictor_kernel_size (`int`, *optional*, defaults to 3):
108
+ The kernel size used in the energy predictor.
109
+ energy_predictor_dropout (`float`, *optional*, defaults to 0.5):
110
+ The dropout rate in the energy predictor.
111
+ energy_embed_kernel_size (`int`, *optional*, defaults to 1):
112
+ The kernel size used in the energy embed layer.
113
+ energy_embed_dropout (`float`, *optional*, defaults to 0.0):
114
+ The dropout rate in the energy embed layer.
115
+ stop_gradient_from_energy_predictor (`bool`, *optional*, defaults to `False`):
116
+ Specifies whether to stop gradients from the energy predictor.
117
+ pitch_predictor_layers (`int`, *optional*, defaults to 5):
118
+ The number of layers in the pitch predictor.
119
+ pitch_predictor_channels (`int`, *optional*, defaults to 256):
120
+ The number of channels in the pitch predictor.
121
+ pitch_predictor_kernel_size (`int`, *optional*, defaults to 5):
122
+ The kernel size used in the pitch predictor.
123
+ pitch_predictor_dropout (`float`, *optional*, defaults to 0.5):
124
+ The dropout rate in the pitch predictor.
125
+ pitch_embed_kernel_size (`int`, *optional*, defaults to 1):
126
+ The kernel size used in the pitch embed layer.
127
+ pitch_embed_dropout (`float`, *optional*, defaults to 0.0):
128
+ The dropout rate in the pitch embed layer.
129
+ stop_gradient_from_pitch_predictor (`bool`, *optional*, defaults to `True`):
130
+ Specifies whether to stop gradients from the pitch predictor.
131
+ encoder_dropout_rate (`float`, *optional*, defaults to 0.2):
132
+ The dropout rate in the encoder.
133
+ encoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
134
+ The positional dropout rate in the encoder.
135
+ encoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
136
+ The attention dropout rate in the encoder.
137
+ decoder_dropout_rate (`float`, *optional*, defaults to 0.2):
138
+ The dropout rate in the decoder.
139
+ decoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
140
+ The positional dropout rate in the decoder.
141
+ decoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
142
+ The attention dropout rate in the decoder.
143
+ duration_predictor_dropout_rate (`float`, *optional*, defaults to 0.2):
144
+ The dropout rate in the duration predictor.
145
+ speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5):
146
+ The dropout rate in the speech decoder postnet.
147
+ max_source_positions (`int`, *optional*, defaults to 5000):
148
+ if `"relative"` position embeddings are used, defines the maximum source input positions.
149
+ use_masking (`bool`, *optional*, defaults to `True`):
150
+ Specifies whether to use masking in the model.
151
+ use_weighted_masking (`bool`, *optional*, defaults to `False`):
152
+ Specifies whether to use weighted masking in the model.
153
+ num_speakers (`int`, *optional*):
154
+ Number of speakers. If set to > 1, assume that the speaker ids will be provided as the input and use
155
+ speaker id embedding layer.
156
+ num_languages (`int`, *optional*):
157
+ Number of languages. If set to > 1, assume that the language ids will be provided as the input and use the
158
+ languge id embedding layer.
159
+ speaker_embed_dim (`int`, *optional*):
160
+ Speaker embedding dimension. If set to > 0, assume that speaker_embedding will be provided as the input.
161
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
162
+ Specifies whether the model is an encoder-decoder.
163
+
164
+ Example:
165
+
166
+ ```python
167
+ >>> from transformers import FastSpeech2ConformerModel, FastSpeech2ConformerConfig
168
+
169
+ >>> # Initializing a FastSpeech2Conformer style configuration
170
+ >>> configuration = FastSpeech2ConformerConfig()
171
+
172
+ >>> # Initializing a model from the FastSpeech2Conformer style configuration
173
+ >>> model = FastSpeech2ConformerModel(configuration)
174
+
175
+ >>> # Accessing the model configuration
176
+ >>> configuration = model.config
177
+ ```"""
178
+
179
+ model_type = "fastspeech2_conformer"
180
+ attribute_map = {"num_hidden_layers": "encoder_layers", "num_attention_heads": "encoder_num_attention_heads"}
181
+
182
+ def __init__(
183
+ self,
184
+ hidden_size=384,
185
+ vocab_size=78,
186
+ num_mel_bins=80,
187
+ encoder_num_attention_heads=2,
188
+ encoder_layers=4,
189
+ encoder_linear_units=1536,
190
+ decoder_layers=4,
191
+ decoder_num_attention_heads=2,
192
+ decoder_linear_units=1536,
193
+ speech_decoder_postnet_layers=5,
194
+ speech_decoder_postnet_units=256,
195
+ speech_decoder_postnet_kernel=5,
196
+ positionwise_conv_kernel_size=3,
197
+ encoder_normalize_before=False,
198
+ decoder_normalize_before=False,
199
+ encoder_concat_after=False,
200
+ decoder_concat_after=False,
201
+ reduction_factor=1,
202
+ speaking_speed=1.0,
203
+ use_macaron_style_in_conformer=True,
204
+ use_cnn_in_conformer=True,
205
+ encoder_kernel_size=7,
206
+ decoder_kernel_size=31,
207
+ duration_predictor_layers=2,
208
+ duration_predictor_channels=256,
209
+ duration_predictor_kernel_size=3,
210
+ energy_predictor_layers=2,
211
+ energy_predictor_channels=256,
212
+ energy_predictor_kernel_size=3,
213
+ energy_predictor_dropout=0.5,
214
+ energy_embed_kernel_size=1,
215
+ energy_embed_dropout=0.0,
216
+ stop_gradient_from_energy_predictor=False,
217
+ pitch_predictor_layers=5,
218
+ pitch_predictor_channels=256,
219
+ pitch_predictor_kernel_size=5,
220
+ pitch_predictor_dropout=0.5,
221
+ pitch_embed_kernel_size=1,
222
+ pitch_embed_dropout=0.0,
223
+ stop_gradient_from_pitch_predictor=True,
224
+ encoder_dropout_rate=0.2,
225
+ encoder_positional_dropout_rate=0.2,
226
+ encoder_attention_dropout_rate=0.2,
227
+ decoder_dropout_rate=0.2,
228
+ decoder_positional_dropout_rate=0.2,
229
+ decoder_attention_dropout_rate=0.2,
230
+ duration_predictor_dropout_rate=0.2,
231
+ speech_decoder_postnet_dropout=0.5,
232
+ max_source_positions=5000,
233
+ use_masking=True,
234
+ use_weighted_masking=False,
235
+ num_speakers=None,
236
+ num_languages=None,
237
+ speaker_embed_dim=None,
238
+ is_encoder_decoder=True,
239
+ **kwargs,
240
+ ):
241
+ if positionwise_conv_kernel_size % 2 == 0:
242
+ raise ValueError(
243
+ f"positionwise_conv_kernel_size must be odd, but got {positionwise_conv_kernel_size} instead."
244
+ )
245
+ if encoder_kernel_size % 2 == 0:
246
+ raise ValueError(f"encoder_kernel_size must be odd, but got {encoder_kernel_size} instead.")
247
+ if decoder_kernel_size % 2 == 0:
248
+ raise ValueError(f"decoder_kernel_size must be odd, but got {decoder_kernel_size} instead.")
249
+ if duration_predictor_kernel_size % 2 == 0:
250
+ raise ValueError(
251
+ f"duration_predictor_kernel_size must be odd, but got {duration_predictor_kernel_size} instead."
252
+ )
253
+ if energy_predictor_kernel_size % 2 == 0:
254
+ raise ValueError(
255
+ f"energy_predictor_kernel_size must be odd, but got {energy_predictor_kernel_size} instead."
256
+ )
257
+ if energy_embed_kernel_size % 2 == 0:
258
+ raise ValueError(f"energy_embed_kernel_size must be odd, but got {energy_embed_kernel_size} instead.")
259
+ if pitch_predictor_kernel_size % 2 == 0:
260
+ raise ValueError(
261
+ f"pitch_predictor_kernel_size must be odd, but got {pitch_predictor_kernel_size} instead."
262
+ )
263
+ if pitch_embed_kernel_size % 2 == 0:
264
+ raise ValueError(f"pitch_embed_kernel_size must be odd, but got {pitch_embed_kernel_size} instead.")
265
+ if hidden_size % encoder_num_attention_heads != 0:
266
+ raise ValueError("The hidden_size must be evenly divisible by encoder_num_attention_heads.")
267
+ if hidden_size % decoder_num_attention_heads != 0:
268
+ raise ValueError("The hidden_size must be evenly divisible by decoder_num_attention_heads.")
269
+ if use_masking and use_weighted_masking:
270
+ raise ValueError("Either use_masking or use_weighted_masking can be True, but not both.")
271
+
272
+ self.hidden_size = hidden_size
273
+ self.vocab_size = vocab_size
274
+ self.num_mel_bins = num_mel_bins
275
+ self.encoder_config = {
276
+ "num_attention_heads": encoder_num_attention_heads,
277
+ "layers": encoder_layers,
278
+ "kernel_size": encoder_kernel_size,
279
+ "attention_dropout_rate": encoder_attention_dropout_rate,
280
+ "dropout_rate": encoder_dropout_rate,
281
+ "positional_dropout_rate": encoder_positional_dropout_rate,
282
+ "linear_units": encoder_linear_units,
283
+ "normalize_before": encoder_normalize_before,
284
+ "concat_after": encoder_concat_after,
285
+ }
286
+ self.decoder_config = {
287
+ "num_attention_heads": decoder_num_attention_heads,
288
+ "layers": decoder_layers,
289
+ "kernel_size": decoder_kernel_size,
290
+ "attention_dropout_rate": decoder_attention_dropout_rate,
291
+ "dropout_rate": decoder_dropout_rate,
292
+ "positional_dropout_rate": decoder_positional_dropout_rate,
293
+ "linear_units": decoder_linear_units,
294
+ "normalize_before": decoder_normalize_before,
295
+ "concat_after": decoder_concat_after,
296
+ }
297
+ self.encoder_num_attention_heads = encoder_num_attention_heads
298
+ self.encoder_layers = encoder_layers
299
+ self.duration_predictor_channels = duration_predictor_channels
300
+ self.duration_predictor_kernel_size = duration_predictor_kernel_size
301
+ self.duration_predictor_layers = duration_predictor_layers
302
+ self.energy_embed_dropout = energy_embed_dropout
303
+ self.energy_embed_kernel_size = energy_embed_kernel_size
304
+ self.energy_predictor_channels = energy_predictor_channels
305
+ self.energy_predictor_dropout = energy_predictor_dropout
306
+ self.energy_predictor_kernel_size = energy_predictor_kernel_size
307
+ self.energy_predictor_layers = energy_predictor_layers
308
+ self.pitch_embed_dropout = pitch_embed_dropout
309
+ self.pitch_embed_kernel_size = pitch_embed_kernel_size
310
+ self.pitch_predictor_channels = pitch_predictor_channels
311
+ self.pitch_predictor_dropout = pitch_predictor_dropout
312
+ self.pitch_predictor_kernel_size = pitch_predictor_kernel_size
313
+ self.pitch_predictor_layers = pitch_predictor_layers
314
+ self.positionwise_conv_kernel_size = positionwise_conv_kernel_size
315
+ self.speech_decoder_postnet_units = speech_decoder_postnet_units
316
+ self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout
317
+ self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel
318
+ self.speech_decoder_postnet_layers = speech_decoder_postnet_layers
319
+ self.reduction_factor = reduction_factor
320
+ self.speaking_speed = speaking_speed
321
+ self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor
322
+ self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor
323
+ self.max_source_positions = max_source_positions
324
+ self.use_cnn_in_conformer = use_cnn_in_conformer
325
+ self.use_macaron_style_in_conformer = use_macaron_style_in_conformer
326
+ self.use_masking = use_masking
327
+ self.use_weighted_masking = use_weighted_masking
328
+ self.num_speakers = num_speakers
329
+ self.num_languages = num_languages
330
+ self.speaker_embed_dim = speaker_embed_dim
331
+ self.duration_predictor_dropout_rate = duration_predictor_dropout_rate
332
+ self.is_encoder_decoder = is_encoder_decoder
333
+
334
+ super().__init__(
335
+ is_encoder_decoder=is_encoder_decoder,
336
+ **kwargs,
337
+ )
338
+
339
+
340
+ class FastSpeech2ConformerHifiGanConfig(PretrainedConfig):
341
+ r"""
342
+ This is the configuration class to store the configuration of a [`FastSpeech2ConformerHifiGanModel`]. It is used to
343
+ instantiate a FastSpeech2Conformer HiFi-GAN vocoder model according to the specified arguments, defining the model
344
+ architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
345
+ FastSpeech2Conformer
346
+ [espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architecture.
347
+
348
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
349
+ documentation from [`PretrainedConfig`] for more information.
350
+
351
+ Args:
352
+ model_in_dim (`int`, *optional*, defaults to 80):
353
+ The number of frequency bins in the input log-mel spectrogram.
354
+ upsample_initial_channel (`int`, *optional*, defaults to 512):
355
+ The number of input channels into the upsampling network.
356
+ upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 2, 2]`):
357
+ A tuple of integers defining the stride of each 1D convolutional layer in the upsampling network. The
358
+ length of *upsample_rates* defines the number of convolutional layers and has to match the length of
359
+ *upsample_kernel_sizes*.
360
+ upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[16, 16, 4, 4]`):
361
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the upsampling network. The
362
+ length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match the length of
363
+ *upsample_rates*.
364
+ resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`):
365
+ A tuple of integers defining the kernel sizes of the 1D convolutional layers in the multi-receptive field
366
+ fusion (MRF) module.
367
+ resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
368
+ A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the
369
+ multi-receptive field fusion (MRF) module.
370
+ initializer_range (`float`, *optional*, defaults to 0.01):
371
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
372
+ leaky_relu_slope (`float`, *optional*, defaults to 0.1):
373
+ The angle of the negative slope used by the leaky ReLU activation.
374
+ normalize_before (`bool`, *optional*, defaults to `True`):
375
+ Whether or not to normalize the spectrogram before vocoding using the vocoder's learned mean and variance.
376
+
377
+ Example:
378
+
379
+ ```python
380
+ >>> from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig
381
+
382
+ >>> # Initializing a FastSpeech2ConformerHifiGan configuration
383
+ >>> configuration = FastSpeech2ConformerHifiGanConfig()
384
+
385
+ >>> # Initializing a model (with random weights) from the configuration
386
+ >>> model = FastSpeech2ConformerHifiGan(configuration)
387
+
388
+ >>> # Accessing the model configuration
389
+ >>> configuration = model.config
390
+ ```"""
391
+
392
+ model_type = "hifigan"
393
+
394
+ def __init__(
395
+ self,
396
+ model_in_dim=80,
397
+ upsample_initial_channel=512,
398
+ upsample_rates=[8, 8, 2, 2],
399
+ upsample_kernel_sizes=[16, 16, 4, 4],
400
+ resblock_kernel_sizes=[3, 7, 11],
401
+ resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
402
+ initializer_range=0.01,
403
+ leaky_relu_slope=0.1,
404
+ normalize_before=True,
405
+ **kwargs,
406
+ ):
407
+ self.model_in_dim = model_in_dim
408
+ self.upsample_initial_channel = upsample_initial_channel
409
+ self.upsample_rates = upsample_rates
410
+ self.upsample_kernel_sizes = upsample_kernel_sizes
411
+ self.resblock_kernel_sizes = resblock_kernel_sizes
412
+ self.resblock_dilation_sizes = resblock_dilation_sizes
413
+ self.initializer_range = initializer_range
414
+ self.leaky_relu_slope = leaky_relu_slope
415
+ self.normalize_before = normalize_before
416
+ super().__init__(**kwargs)
417
+
418
+
419
+ class FastSpeech2ConformerWithHifiGanConfig(PretrainedConfig):
420
+ """
421
+ This is the configuration class to store the configuration of a [`FastSpeech2ConformerWithHifiGan`]. It is used to
422
+ instantiate a `FastSpeech2ConformerWithHifiGanModel` model according to the specified sub-models configurations,
423
+ defining the model architecture.
424
+
425
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
426
+ FastSpeech2ConformerModel [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer) and
427
+ FastSpeech2ConformerHifiGan
428
+ [espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architectures.
429
+
430
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
431
+ documentation from [`PretrainedConfig`] for more information.
432
+
433
+ Args:
434
+ model_config (`typing.Dict`, *optional*):
435
+ Configuration of the text-to-speech model.
436
+ vocoder_config (`typing.Dict`, *optional*):
437
+ Configuration of the vocoder model.
438
+ model_config ([`FastSpeech2ConformerConfig`], *optional*):
439
+ Configuration of the text-to-speech model.
440
+ vocoder_config ([`FastSpeech2ConformerHiFiGanConfig`], *optional*):
441
+ Configuration of the vocoder model.
442
+
443
+ Example:
444
+
445
+ ```python
446
+ >>> from transformers import (
447
+ ... FastSpeech2ConformerConfig,
448
+ ... FastSpeech2ConformerHifiGanConfig,
449
+ ... FastSpeech2ConformerWithHifiGanConfig,
450
+ ... FastSpeech2ConformerWithHifiGan,
451
+ ... )
452
+
453
+ >>> # Initializing FastSpeech2ConformerWithHifiGan sub-modules configurations.
454
+ >>> model_config = FastSpeech2ConformerConfig()
455
+ >>> vocoder_config = FastSpeech2ConformerHifiGanConfig()
456
+
457
+ >>> # Initializing a FastSpeech2ConformerWithHifiGan module style configuration
458
+ >>> configuration = FastSpeech2ConformerWithHifiGanConfig(model_config.to_dict(), vocoder_config.to_dict())
459
+
460
+ >>> # Initializing a model (with random weights)
461
+ >>> model = FastSpeech2ConformerWithHifiGan(configuration)
462
+
463
+ >>> # Accessing the model configuration
464
+ >>> configuration = model.config
465
+ ```
466
+ """
467
+
468
+ model_type = "fastspeech2_conformer_with_hifigan"
469
+ is_composition = True
470
+
471
+ def __init__(
472
+ self,
473
+ model_config: Dict = None,
474
+ vocoder_config: Dict = None,
475
+ **kwargs,
476
+ ):
477
+ if model_config is None:
478
+ model_config = {}
479
+ logger.info("model_config is None. initializing the model with default values.")
480
+
481
+ if vocoder_config is None:
482
+ vocoder_config = {}
483
+ logger.info("vocoder_config is None. initializing the coarse model with default values.")
484
+
485
+ self.model_config = FastSpeech2ConformerConfig(**model_config)
486
+ self.vocoder_config = FastSpeech2ConformerHifiGanConfig(**vocoder_config)
487
+
488
+ super().__init__(**kwargs)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert FastSpeech2Conformer checkpoint."""
16
+
17
+ import argparse
18
+ import json
19
+ import re
20
+ from pathlib import Path
21
+ from tempfile import TemporaryDirectory
22
+
23
+ import torch
24
+ import yaml
25
+
26
+ from transformers import (
27
+ FastSpeech2ConformerConfig,
28
+ FastSpeech2ConformerModel,
29
+ FastSpeech2ConformerTokenizer,
30
+ logging,
31
+ )
32
+
33
+
34
+ logging.set_verbosity_info()
35
+ logger = logging.get_logger("transformers.models.FastSpeech2Conformer")
36
+
37
+ CONFIG_MAPPING = {
38
+ "adim": "hidden_size",
39
+ "aheads": "num_attention_heads",
40
+ "conformer_dec_kernel_size": "decoder_kernel_size",
41
+ "conformer_enc_kernel_size": "encoder_kernel_size",
42
+ "decoder_normalize_before": "decoder_normalize_before",
43
+ "dlayers": "decoder_layers",
44
+ "dunits": "decoder_linear_units",
45
+ "duration_predictor_chans": "duration_predictor_channels",
46
+ "duration_predictor_kernel_size": "duration_predictor_kernel_size",
47
+ "duration_predictor_layers": "duration_predictor_layers",
48
+ "elayers": "encoder_layers",
49
+ "encoder_normalize_before": "encoder_normalize_before",
50
+ "energy_embed_dropout": "energy_embed_dropout",
51
+ "energy_embed_kernel_size": "energy_embed_kernel_size",
52
+ "energy_predictor_chans": "energy_predictor_channels",
53
+ "energy_predictor_dropout": "energy_predictor_dropout",
54
+ "energy_predictor_kernel_size": "energy_predictor_kernel_size",
55
+ "energy_predictor_layers": "energy_predictor_layers",
56
+ "eunits": "encoder_linear_units",
57
+ "pitch_embed_dropout": "pitch_embed_dropout",
58
+ "pitch_embed_kernel_size": "pitch_embed_kernel_size",
59
+ "pitch_predictor_chans": "pitch_predictor_channels",
60
+ "pitch_predictor_dropout": "pitch_predictor_dropout",
61
+ "pitch_predictor_kernel_size": "pitch_predictor_kernel_size",
62
+ "pitch_predictor_layers": "pitch_predictor_layers",
63
+ "positionwise_conv_kernel_size": "positionwise_conv_kernel_size",
64
+ "postnet_chans": "speech_decoder_postnet_units",
65
+ "postnet_filts": "speech_decoder_postnet_kernel",
66
+ "postnet_layers": "speech_decoder_postnet_layers",
67
+ "reduction_factor": "reduction_factor",
68
+ "stop_gradient_from_energy_predictor": "stop_gradient_from_energy_predictor",
69
+ "stop_gradient_from_pitch_predictor": "stop_gradient_from_pitch_predictor",
70
+ "transformer_dec_attn_dropout_rate": "decoder_attention_dropout_rate",
71
+ "transformer_dec_dropout_rate": "decoder_dropout_rate",
72
+ "transformer_dec_positional_dropout_rate": "decoder_positional_dropout_rate",
73
+ "transformer_enc_attn_dropout_rate": "encoder_attention_dropout_rate",
74
+ "transformer_enc_dropout_rate": "encoder_dropout_rate",
75
+ "transformer_enc_positional_dropout_rate": "encoder_positional_dropout_rate",
76
+ "use_cnn_in_conformer": "use_cnn_in_conformer",
77
+ "use_macaron_style_in_conformer": "use_macaron_style_in_conformer",
78
+ "use_masking": "use_masking",
79
+ "use_weighted_masking": "use_weighted_masking",
80
+ "idim": "input_dim",
81
+ "odim": "num_mel_bins",
82
+ "spk_embed_dim": "speaker_embed_dim",
83
+ "langs": "num_languages",
84
+ "spks": "num_speakers",
85
+ }
86
+
87
+
88
+ def remap_model_yaml_config(yaml_config_path):
89
+ with Path(yaml_config_path).open("r", encoding="utf-8") as f:
90
+ args = yaml.safe_load(f)
91
+ args = argparse.Namespace(**args)
92
+
93
+ remapped_config = {}
94
+
95
+ model_params = args.tts_conf["text2mel_params"]
96
+ # espnet_config_key -> hf_config_key, any keys not included are ignored
97
+ for espnet_config_key, hf_config_key in CONFIG_MAPPING.items():
98
+ if espnet_config_key in model_params:
99
+ remapped_config[hf_config_key] = model_params[espnet_config_key]
100
+
101
+ return remapped_config, args.g2p, args.token_list
102
+
103
+
104
+ def convert_espnet_state_dict_to_hf(state_dict):
105
+ new_state_dict = {}
106
+ for key in state_dict:
107
+ if "tts.generator.text2mel." in key:
108
+ new_key = key.replace("tts.generator.text2mel.", "")
109
+ if "postnet" in key:
110
+ new_key = new_key.replace("postnet.postnet", "speech_decoder_postnet.layers")
111
+ new_key = new_key.replace(".0.weight", ".conv.weight")
112
+ new_key = new_key.replace(".1.weight", ".batch_norm.weight")
113
+ new_key = new_key.replace(".1.bias", ".batch_norm.bias")
114
+ new_key = new_key.replace(".1.running_mean", ".batch_norm.running_mean")
115
+ new_key = new_key.replace(".1.running_var", ".batch_norm.running_var")
116
+ new_key = new_key.replace(".1.num_batches_tracked", ".batch_norm.num_batches_tracked")
117
+ if "feat_out" in key:
118
+ if "weight" in key:
119
+ new_key = "speech_decoder_postnet.feat_out.weight"
120
+ if "bias" in key:
121
+ new_key = "speech_decoder_postnet.feat_out.bias"
122
+ if "encoder.embed.0.weight" in key:
123
+ new_key = new_key.replace("0.", "")
124
+ if "w_1" in key:
125
+ new_key = new_key.replace("w_1", "conv1")
126
+ if "w_2" in key:
127
+ new_key = new_key.replace("w_2", "conv2")
128
+ if "predictor.conv" in key:
129
+ new_key = new_key.replace(".conv", ".conv_layers")
130
+ pattern = r"(\d)\.(\d)"
131
+ replacement = (
132
+ r"\1.conv" if ("2.weight" not in new_key) and ("2.bias" not in new_key) else r"\1.layer_norm"
133
+ )
134
+ new_key = re.sub(pattern, replacement, new_key)
135
+ if "pitch_embed" in key or "energy_embed" in key:
136
+ new_key = new_key.replace("0", "conv")
137
+ if "encoders" in key:
138
+ new_key = new_key.replace("encoders", "conformer_layers")
139
+ new_key = new_key.replace("norm_final", "final_layer_norm")
140
+ new_key = new_key.replace("norm_mha", "self_attn_layer_norm")
141
+ new_key = new_key.replace("norm_ff_macaron", "ff_macaron_layer_norm")
142
+ new_key = new_key.replace("norm_ff", "ff_layer_norm")
143
+ new_key = new_key.replace("norm_conv", "conv_layer_norm")
144
+ if "lid_emb" in key:
145
+ new_key = new_key.replace("lid_emb", "language_id_embedding")
146
+ if "sid_emb" in key:
147
+ new_key = new_key.replace("sid_emb", "speaker_id_embedding")
148
+
149
+ new_state_dict[new_key] = state_dict[key]
150
+
151
+ return new_state_dict
152
+
153
+
154
+ @torch.no_grad()
155
+ def convert_FastSpeech2ConformerModel_checkpoint(
156
+ checkpoint_path,
157
+ yaml_config_path,
158
+ pytorch_dump_folder_path,
159
+ repo_id=None,
160
+ ):
161
+ model_params, tokenizer_name, vocab = remap_model_yaml_config(yaml_config_path)
162
+ config = FastSpeech2ConformerConfig(**model_params)
163
+
164
+ # Prepare the model
165
+ model = FastSpeech2ConformerModel(config)
166
+
167
+ espnet_checkpoint = torch.load(checkpoint_path)
168
+ hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint)
169
+
170
+ model.load_state_dict(hf_compatible_state_dict)
171
+
172
+ model.save_pretrained(pytorch_dump_folder_path)
173
+
174
+ # Prepare the tokenizer
175
+ with TemporaryDirectory() as tempdir:
176
+ vocab = {token: id for id, token in enumerate(vocab)}
177
+ vocab_file = Path(tempdir) / "vocab.json"
178
+ with open(vocab_file, "w") as f:
179
+ json.dump(vocab, f)
180
+ should_strip_spaces = "no_space" in tokenizer_name
181
+ tokenizer = FastSpeech2ConformerTokenizer(str(vocab_file), should_strip_spaces=should_strip_spaces)
182
+
183
+ tokenizer.save_pretrained(pytorch_dump_folder_path)
184
+
185
+ if repo_id:
186
+ print("Pushing to the hub...")
187
+ model.push_to_hub(repo_id)
188
+ tokenizer.push_to_hub(repo_id)
189
+
190
+
191
+ if __name__ == "__main__":
192
+ parser = argparse.ArgumentParser()
193
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
194
+ parser.add_argument(
195
+ "--yaml_config_path", required=True, default=None, type=str, help="Path to config.yaml of model to convert"
196
+ )
197
+ parser.add_argument(
198
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
199
+ )
200
+ parser.add_argument(
201
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
202
+ )
203
+
204
+ args = parser.parse_args()
205
+ convert_FastSpeech2ConformerModel_checkpoint(
206
+ args.checkpoint_path,
207
+ args.yaml_config_path,
208
+ args.pytorch_dump_folder_path,
209
+ args.push_to_hub,
210
+ )
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_hifigan.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert FastSpeech2Conformer HiFi-GAN checkpoint."""
16
+
17
+ import argparse
18
+ from pathlib import Path
19
+
20
+ import torch
21
+ import yaml
22
+
23
+ from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig, logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+ logger = logging.get_logger("transformers.models.FastSpeech2Conformer")
28
+
29
+
30
+ def load_weights(checkpoint, hf_model, config):
31
+ vocoder_key_prefix = "tts.generator.vocoder."
32
+ checkpoint = {k.replace(vocoder_key_prefix, ""): v for k, v in checkpoint.items() if vocoder_key_prefix in k}
33
+
34
+ hf_model.apply_weight_norm()
35
+
36
+ hf_model.conv_pre.weight_g.data = checkpoint["input_conv.weight_g"]
37
+ hf_model.conv_pre.weight_v.data = checkpoint["input_conv.weight_v"]
38
+ hf_model.conv_pre.bias.data = checkpoint["input_conv.bias"]
39
+
40
+ for i in range(len(config.upsample_rates)):
41
+ hf_model.upsampler[i].weight_g.data = checkpoint[f"upsamples.{i}.1.weight_g"]
42
+ hf_model.upsampler[i].weight_v.data = checkpoint[f"upsamples.{i}.1.weight_v"]
43
+ hf_model.upsampler[i].bias.data = checkpoint[f"upsamples.{i}.1.bias"]
44
+
45
+ for i in range(len(config.upsample_rates) * len(config.resblock_kernel_sizes)):
46
+ for j in range(len(config.resblock_dilation_sizes)):
47
+ hf_model.resblocks[i].convs1[j].weight_g.data = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
48
+ hf_model.resblocks[i].convs1[j].weight_v.data = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
49
+ hf_model.resblocks[i].convs1[j].bias.data = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
50
+
51
+ hf_model.resblocks[i].convs2[j].weight_g.data = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
52
+ hf_model.resblocks[i].convs2[j].weight_v.data = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
53
+ hf_model.resblocks[i].convs2[j].bias.data = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
54
+
55
+ hf_model.conv_post.weight_g.data = checkpoint["output_conv.1.weight_g"]
56
+ hf_model.conv_post.weight_v.data = checkpoint["output_conv.1.weight_v"]
57
+ hf_model.conv_post.bias.data = checkpoint["output_conv.1.bias"]
58
+
59
+ hf_model.remove_weight_norm()
60
+
61
+
62
+ def remap_hifigan_yaml_config(yaml_config_path):
63
+ with Path(yaml_config_path).open("r", encoding="utf-8") as f:
64
+ args = yaml.safe_load(f)
65
+ args = argparse.Namespace(**args)
66
+
67
+ vocoder_type = args.tts_conf["vocoder_type"]
68
+ if vocoder_type != "hifigan_generator":
69
+ raise TypeError(f"Vocoder config must be for `hifigan_generator`, but got {vocoder_type}")
70
+
71
+ remapped_dict = {}
72
+ vocoder_params = args.tts_conf["vocoder_params"]
73
+
74
+ # espnet_config_key -> hf_config_key
75
+ key_mappings = {
76
+ "channels": "upsample_initial_channel",
77
+ "in_channels": "model_in_dim",
78
+ "resblock_dilations": "resblock_dilation_sizes",
79
+ "resblock_kernel_sizes": "resblock_kernel_sizes",
80
+ "upsample_kernel_sizes": "upsample_kernel_sizes",
81
+ "upsample_scales": "upsample_rates",
82
+ }
83
+ for espnet_config_key, hf_config_key in key_mappings.items():
84
+ remapped_dict[hf_config_key] = vocoder_params[espnet_config_key]
85
+ remapped_dict["sampling_rate"] = args.tts_conf["sampling_rate"]
86
+ remapped_dict["normalize_before"] = False
87
+ remapped_dict["leaky_relu_slope"] = vocoder_params["nonlinear_activation_params"]["negative_slope"]
88
+
89
+ return remapped_dict
90
+
91
+
92
+ @torch.no_grad()
93
+ def convert_hifigan_checkpoint(
94
+ checkpoint_path,
95
+ pytorch_dump_folder_path,
96
+ yaml_config_path=None,
97
+ repo_id=None,
98
+ ):
99
+ if yaml_config_path is not None:
100
+ config_kwargs = remap_hifigan_yaml_config(yaml_config_path)
101
+ config = FastSpeech2ConformerHifiGanConfig(**config_kwargs)
102
+ else:
103
+ config = FastSpeech2ConformerHifiGanConfig()
104
+
105
+ model = FastSpeech2ConformerHifiGan(config)
106
+
107
+ orig_checkpoint = torch.load(checkpoint_path)
108
+ load_weights(orig_checkpoint, model, config)
109
+
110
+ model.save_pretrained(pytorch_dump_folder_path)
111
+
112
+ if repo_id:
113
+ print("Pushing to the hub...")
114
+ model.push_to_hub(repo_id)
115
+
116
+
117
+ if __name__ == "__main__":
118
+ parser = argparse.ArgumentParser()
119
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
120
+ parser.add_argument("--yaml_config_path", default=None, type=str, help="Path to config.yaml of model to convert")
121
+ parser.add_argument(
122
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
123
+ )
124
+ parser.add_argument(
125
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
126
+ )
127
+
128
+ args = parser.parse_args()
129
+ convert_hifigan_checkpoint(
130
+ args.checkpoint_path,
131
+ args.pytorch_dump_folder_path,
132
+ args.yaml_config_path,
133
+ args.push_to_hub,
134
+ )
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert FastSpeech2Conformer checkpoint."""
16
+
17
+ import argparse
18
+
19
+ import torch
20
+
21
+ from transformers import (
22
+ FastSpeech2ConformerConfig,
23
+ FastSpeech2ConformerHifiGan,
24
+ FastSpeech2ConformerHifiGanConfig,
25
+ FastSpeech2ConformerModel,
26
+ FastSpeech2ConformerWithHifiGan,
27
+ FastSpeech2ConformerWithHifiGanConfig,
28
+ logging,
29
+ )
30
+
31
+ from .convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch import (
32
+ convert_espnet_state_dict_to_hf,
33
+ remap_model_yaml_config,
34
+ )
35
+ from .convert_hifigan import load_weights, remap_hifigan_yaml_config
36
+
37
+
38
+ logging.set_verbosity_info()
39
+ logger = logging.get_logger("transformers.models.FastSpeech2Conformer")
40
+
41
+
42
+ def convert_FastSpeech2ConformerWithHifiGan_checkpoint(
43
+ checkpoint_path,
44
+ yaml_config_path,
45
+ pytorch_dump_folder_path,
46
+ repo_id=None,
47
+ ):
48
+ # Prepare the model
49
+ model_params, *_ = remap_model_yaml_config(yaml_config_path)
50
+ model_config = FastSpeech2ConformerConfig(**model_params)
51
+
52
+ model = FastSpeech2ConformerModel(model_config)
53
+
54
+ espnet_checkpoint = torch.load(checkpoint_path)
55
+ hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint)
56
+ model.load_state_dict(hf_compatible_state_dict)
57
+
58
+ # Prepare the vocoder
59
+ config_kwargs = remap_hifigan_yaml_config(yaml_config_path)
60
+ vocoder_config = FastSpeech2ConformerHifiGanConfig(**config_kwargs)
61
+
62
+ vocoder = FastSpeech2ConformerHifiGan(vocoder_config)
63
+ load_weights(espnet_checkpoint, vocoder, vocoder_config)
64
+
65
+ # Prepare the model + vocoder
66
+ config = FastSpeech2ConformerWithHifiGanConfig.from_sub_model_configs(model_config, vocoder_config)
67
+ with_hifigan_model = FastSpeech2ConformerWithHifiGan(config)
68
+ with_hifigan_model.model = model
69
+ with_hifigan_model.vocoder = vocoder
70
+
71
+ with_hifigan_model.save_pretrained(pytorch_dump_folder_path)
72
+
73
+ if repo_id:
74
+ print("Pushing to the hub...")
75
+ with_hifigan_model.push_to_hub(repo_id)
76
+
77
+
78
+ if __name__ == "__main__":
79
+ parser = argparse.ArgumentParser()
80
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
81
+ parser.add_argument(
82
+ "--yaml_config_path", required=True, default=None, type=str, help="Path to config.yaml of model to convert"
83
+ )
84
+ parser.add_argument(
85
+ "--pytorch_dump_folder_path",
86
+ required=True,
87
+ default=None,
88
+ type=str,
89
+ help="Path to the output `FastSpeech2ConformerModel` PyTorch model.",
90
+ )
91
+ parser.add_argument(
92
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
93
+ )
94
+
95
+ args = parser.parse_args()
96
+
97
+ convert_FastSpeech2ConformerWithHifiGan_checkpoint(
98
+ args.checkpoint_path,
99
+ args.yaml_config_path,
100
+ args.pytorch_dump_folder_path,
101
+ args.push_to_hub,
102
+ )
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py ADDED
@@ -0,0 +1,1686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Espnet authors, IMS Toucan authors, and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch FastSpeech2Conformer model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ from torch import nn
23
+
24
+ from ...modeling_outputs import BaseModelOutput
25
+ from ...modeling_utils import PreTrainedModel
26
+ from ...utils import ModelOutput, add_start_docstrings, logging, replace_return_docstrings
27
+ from .configuration_fastspeech2_conformer import (
28
+ FastSpeech2ConformerConfig,
29
+ FastSpeech2ConformerHifiGanConfig,
30
+ FastSpeech2ConformerWithHifiGanConfig,
31
+ )
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
37
+ "espnet/fastspeech2_conformer",
38
+ # See all FastSpeech2Conformer models at https://huggingface.co/models?filter=fastspeech2_conformer
39
+ ]
40
+
41
+
42
+ @dataclass
43
+ class FastSpeech2ConformerModelOutput(ModelOutput):
44
+ """
45
+ Output type of [`FastSpeech2ConformerModel`].
46
+
47
+ Args:
48
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
49
+ Spectrogram generation loss.
50
+ spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`):
51
+ The predicted spectrogram.
52
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
53
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
54
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
55
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
56
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
57
+
58
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
59
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
60
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
61
+ sequence_length)`.
62
+
63
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
64
+ self-attention heads.
65
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
66
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
67
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
68
+
69
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
70
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
71
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
72
+ sequence_length)`.
73
+
74
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
75
+ self-attention heads.
76
+ duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
77
+ Outputs of the duration predictor.
78
+ pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
79
+ Outputs of the pitch predictor.
80
+ energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
81
+ Outputs of the energy predictor.
82
+
83
+ """
84
+
85
+ loss: Optional[torch.FloatTensor] = None
86
+ spectrogram: torch.FloatTensor = None
87
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
88
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
89
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
90
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
91
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
92
+ duration_outputs: torch.LongTensor = None
93
+ pitch_outputs: torch.FloatTensor = None
94
+ energy_outputs: torch.FloatTensor = None
95
+
96
+
97
+ @dataclass
98
+ class FastSpeech2ConformerWithHifiGanOutput(FastSpeech2ConformerModelOutput):
99
+ """
100
+ Output type of [`FastSpeech2ConformerWithHifiGan`].
101
+
102
+ Args:
103
+ waveform (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
104
+ Speech output as a result of passing the predicted mel spectrogram through the vocoder.
105
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
106
+ Spectrogram generation loss.
107
+ spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`):
108
+ The predicted spectrogram.
109
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
110
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
111
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
112
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
113
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
114
+
115
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
116
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
117
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
118
+ sequence_length)`.
119
+
120
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
121
+ self-attention heads.
122
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
123
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
124
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
125
+
126
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
127
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
128
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
129
+ sequence_length)`.
130
+
131
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
132
+ self-attention heads.
133
+ duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
134
+ Outputs of the duration predictor.
135
+ pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
136
+ Outputs of the pitch predictor.
137
+ energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
138
+ Outputs of the energy predictor.
139
+ """
140
+
141
+ waveform: torch.FloatTensor = None
142
+
143
+
144
+ _CONFIG_FOR_DOC = "FastSpeech2ConformerConfig"
145
+
146
+ FASTSPEECH2_CONFORMER_START_DOCSTRING = r"""
147
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
148
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
149
+ etc.)
150
+
151
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
152
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
153
+ and behavior.
154
+
155
+ Parameters:
156
+ config ([`FastSpeech2ConformerConfig`]):
157
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
158
+ load the weights associated with the model, only the configuration. Check out the
159
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
160
+ """
161
+
162
+
163
+ HIFIGAN_START_DOCSTRING = r"""
164
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
165
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
166
+ etc.)
167
+
168
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
169
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
170
+ and behavior.
171
+
172
+ Parameters:
173
+ config ([`FastSpeech2ConformerConfig`]):
174
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
175
+ load the weights associated with the model, only the configuration. Check out the
176
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
177
+ """
178
+
179
+ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_START_DOCSTRING = r"""
180
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
181
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
182
+ etc.)
183
+
184
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
185
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
186
+ and behavior.
187
+
188
+ Parameters:
189
+ config ([`FastSpeech2ConformerWithHifiGanConfig`]):
190
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
191
+ load the weights associated with the model, only the configuration. Check out the
192
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
193
+ """
194
+
195
+
196
+ def length_regulator(encoded_embeddings, duration_labels, speaking_speed=1.0):
197
+ """
198
+ Length regulator for feed-forward Transformer.
199
+
200
+ This is the length regulator module described in `FastSpeech: Fast, Robust and Controllable Text to Speech`
201
+ https://arxiv.org/pdf/1905.09263.pdf. The length regulator expands char or phoneme-level embedding features to
202
+ frame-level by repeating each feature based on the corresponding predicted durations.
203
+
204
+ Args:
205
+ encoded_embeddings (`torch.Tensor` of shape `(batch_size, max_text_length, embedding_dim)`):
206
+ Batch of sequences of char or phoneme embeddings.
207
+ duration_labels (`torch.LongTensor` of shape `(batch_size, time)`):
208
+ Batch of durations of each frame.
209
+ speaking_speed (`float`, *optional*, defaults to 1.0):
210
+ Value to control speed of speech.
211
+
212
+ Returns:
213
+ `torch.Tensor`:
214
+ Replicated input tensor based on durations (batch_size, time*, embedding_dim).
215
+ """
216
+
217
+ if speaking_speed <= 0:
218
+ raise ValueError("`speaking_speed` must be greater than 0.")
219
+ elif speaking_speed != 1.0:
220
+ duration_labels = torch.round(duration_labels.float() * speaking_speed).long()
221
+
222
+ if duration_labels.sum() == 0:
223
+ duration_labels[duration_labels.sum(dim=1).eq(0)] = 1
224
+
225
+ # Calculate the maximum length needed
226
+ max_len = torch.sum(duration_labels, dim=1).max()
227
+
228
+ # Create a padded tensor to hold the results
229
+ hidden_states = torch.zeros(
230
+ (encoded_embeddings.size(0), max_len, encoded_embeddings.size(2)),
231
+ dtype=torch.float,
232
+ device=encoded_embeddings.device,
233
+ )
234
+
235
+ # Loop through the batch and fill in the data
236
+ for i, (encoded_embedding, target_duration) in enumerate(zip(encoded_embeddings, duration_labels)):
237
+ repeated = torch.repeat_interleave(encoded_embedding, target_duration, dim=0)
238
+ hidden_states[i, : repeated.size(0)] = repeated
239
+
240
+ return hidden_states
241
+
242
+
243
+ class FastSpeech2ConformerDurationPredictor(nn.Module):
244
+ """
245
+ Duration predictor module.
246
+
247
+ This is a module of duration predictor described in the paper 'FastSpeech: Fast, Robust and Controllable Text to
248
+ Speech' https://arxiv.org/pdf/1905.09263.pdf The duration predictor predicts a duration of each frame in log domain
249
+ from the hidden embeddings of encoder.
250
+
251
+ Note:
252
+ The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`, the
253
+ outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
254
+
255
+ """
256
+
257
+ def __init__(self, config: FastSpeech2ConformerConfig):
258
+ super().__init__()
259
+
260
+ self.conv_layers = nn.ModuleList()
261
+ self.log_domain_offset = 1.0
262
+
263
+ for layer_idx in range(config.duration_predictor_layers):
264
+ num_chans = config.duration_predictor_channels
265
+ input_channels = config.hidden_size if layer_idx == 0 else num_chans
266
+ layer = FastSpeech2ConformerPredictorLayer(
267
+ input_channels,
268
+ num_chans,
269
+ config.duration_predictor_kernel_size,
270
+ config.duration_predictor_dropout_rate,
271
+ )
272
+ self.conv_layers.append(layer)
273
+ self.linear = nn.Linear(config.duration_predictor_channels, 1)
274
+
275
+ def forward(self, encoder_hidden_states):
276
+ """
277
+ Args:
278
+ hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
279
+ Batch of input sequences.
280
+ padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
281
+ Batch of masks indicating padded part.
282
+
283
+ Returns:
284
+ `torch.Tensor`: Batch of predicted durations in log domain `(batch_size, max_text_length)`.
285
+
286
+ """
287
+ # (batch_size, input_dim, max_text_length)
288
+ hidden_states = encoder_hidden_states.transpose(1, -1)
289
+ for layer in self.conv_layers:
290
+ hidden_states = layer(hidden_states)
291
+
292
+ # NOTE: calculate in log domain, (batch_size, max_text_length)
293
+ hidden_states = self.linear(hidden_states.transpose(1, -1)).squeeze(-1)
294
+
295
+ if not self.training:
296
+ # NOTE: calculate in linear domain
297
+ hidden_states = torch.clamp(torch.round(hidden_states.exp() - self.log_domain_offset), min=0).long()
298
+
299
+ return hidden_states
300
+
301
+
302
+ # Copied from transformers.models.speecht5.modeling_speecht5.SpeechT5BatchNormConvLayer
303
+ class FastSpeech2ConformerBatchNormConvLayer(nn.Module):
304
+ def __init__(self, config, layer_id=0):
305
+ super().__init__()
306
+
307
+ if layer_id == 0:
308
+ in_conv_dim = config.num_mel_bins
309
+ else:
310
+ in_conv_dim = config.speech_decoder_postnet_units
311
+
312
+ if layer_id == config.speech_decoder_postnet_layers - 1:
313
+ out_conv_dim = config.num_mel_bins
314
+ else:
315
+ out_conv_dim = config.speech_decoder_postnet_units
316
+
317
+ self.conv = nn.Conv1d(
318
+ in_conv_dim,
319
+ out_conv_dim,
320
+ kernel_size=config.speech_decoder_postnet_kernel,
321
+ stride=1,
322
+ padding=(config.speech_decoder_postnet_kernel - 1) // 2,
323
+ bias=False,
324
+ )
325
+ self.batch_norm = nn.BatchNorm1d(out_conv_dim)
326
+
327
+ if layer_id < config.speech_decoder_postnet_layers - 1:
328
+ self.activation = nn.Tanh()
329
+ else:
330
+ self.activation = None
331
+
332
+ self.dropout = nn.Dropout(config.speech_decoder_postnet_dropout)
333
+
334
+ def forward(self, hidden_states):
335
+ hidden_states = self.conv(hidden_states)
336
+ hidden_states = self.batch_norm(hidden_states)
337
+ if self.activation is not None:
338
+ hidden_states = self.activation(hidden_states)
339
+ hidden_states = self.dropout(hidden_states)
340
+ return hidden_states
341
+
342
+
343
+ class FastSpeech2ConformerSpeechDecoderPostnet(nn.Module):
344
+ def __init__(self, config):
345
+ super().__init__()
346
+ self.config = config
347
+ self.feat_out = nn.Linear(config.hidden_size, config.num_mel_bins * config.reduction_factor)
348
+ self.layers = nn.ModuleList(
349
+ [FastSpeech2ConformerBatchNormConvLayer(config, i) for i in range(config.speech_decoder_postnet_layers)]
350
+ )
351
+
352
+ def forward(self, hidden_states: torch.Tensor):
353
+ outputs_before_postnet = self.feat_out(hidden_states).view(hidden_states.size(0), -1, self.config.num_mel_bins)
354
+ layer_output = outputs_before_postnet.transpose(1, 2)
355
+ for layer in self.layers:
356
+ layer_output = layer(layer_output)
357
+ outputs_after_postnet = outputs_before_postnet + layer_output.transpose(1, 2)
358
+ return outputs_before_postnet, outputs_after_postnet
359
+
360
+
361
+ class FastSpeech2ConformerPredictorLayer(nn.Module):
362
+ def __init__(self, input_channels, num_chans, kernel_size, dropout_rate):
363
+ super().__init__()
364
+ self.conv = nn.Conv1d(
365
+ input_channels,
366
+ num_chans,
367
+ kernel_size,
368
+ stride=1,
369
+ padding=(kernel_size - 1) // 2,
370
+ )
371
+ self.activation = nn.ReLU()
372
+ self.layer_norm = nn.LayerNorm(num_chans)
373
+ self.dropout = nn.Dropout(dropout_rate)
374
+
375
+ def forward(self, hidden_states):
376
+ hidden_states = self.conv(hidden_states)
377
+ hidden_states = self.activation(hidden_states)
378
+
379
+ # Perform layer norm on dimension 1
380
+ hidden_states = hidden_states.transpose(1, -1)
381
+ hidden_states = self.layer_norm(hidden_states)
382
+ hidden_states = hidden_states.transpose(1, -1)
383
+
384
+ hidden_states = self.dropout(hidden_states)
385
+
386
+ return hidden_states
387
+
388
+
389
+ class FastSpeech2ConformerVariancePredictor(nn.Module):
390
+ def __init__(
391
+ self,
392
+ config: FastSpeech2ConformerConfig,
393
+ num_layers=2,
394
+ num_chans=384,
395
+ kernel_size=3,
396
+ dropout_rate=0.5,
397
+ ):
398
+ """
399
+ Initilize variance predictor module.
400
+
401
+ Args:
402
+ input_dim (`int`): Input dimension.
403
+ num_layers (`int`, *optional*, defaults to 2): Number of convolutional layers.
404
+ num_chans (`int`, *optional*, defaults to 384): Number of channels of convolutional layers.
405
+ kernel_size (`int`, *optional*, defaults to 3): Kernel size of convolutional layers.
406
+ dropout_rate (`float`, *optional*, defaults to 0.5): Dropout rate.
407
+ """
408
+ super().__init__()
409
+ self.conv_layers = nn.ModuleList()
410
+ for idx in range(num_layers):
411
+ input_channels = config.hidden_size if idx == 0 else num_chans
412
+ layer = FastSpeech2ConformerPredictorLayer(input_channels, num_chans, kernel_size, dropout_rate)
413
+ self.conv_layers.append(layer)
414
+ self.linear = nn.Linear(num_chans, 1)
415
+
416
+ def forward(self, encoder_hidden_states, padding_masks=None):
417
+ """
418
+ Calculate forward propagation.
419
+
420
+ Args:
421
+ encoder_hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
422
+ Batch of input sequences.
423
+ padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
424
+ Batch of masks indicating padded part.
425
+
426
+ Returns:
427
+ Tensor: Batch of predicted sequences `(batch_size, max_text_length, 1)`.
428
+ """
429
+ # (batch_size, input_dim, max_text_length)
430
+ hidden_states = encoder_hidden_states.transpose(1, -1)
431
+ for layer in self.conv_layers:
432
+ hidden_states = layer(hidden_states)
433
+
434
+ hidden_states = self.linear(hidden_states.transpose(1, 2))
435
+
436
+ if padding_masks is not None:
437
+ hidden_states = hidden_states.masked_fill(padding_masks, 0.0)
438
+
439
+ return hidden_states
440
+
441
+
442
+ class FastSpeech2ConformerVarianceEmbedding(nn.Module):
443
+ def __init__(
444
+ self,
445
+ in_channels=1,
446
+ out_channels=384,
447
+ kernel_size=1,
448
+ padding=0,
449
+ dropout_rate=0.0,
450
+ ):
451
+ super().__init__()
452
+ self.conv = nn.Conv1d(
453
+ in_channels=in_channels,
454
+ out_channels=out_channels,
455
+ kernel_size=kernel_size,
456
+ padding=padding,
457
+ )
458
+ self.dropout = nn.Dropout(dropout_rate)
459
+
460
+ def forward(self, hidden_states):
461
+ hidden_states = hidden_states.transpose(1, 2)
462
+ hidden_states = self.conv(hidden_states)
463
+ hidden_states = self.dropout(hidden_states)
464
+ hidden_states = hidden_states.transpose(1, 2)
465
+ return hidden_states
466
+
467
+
468
+ class FastSpeech2ConformerAttention(nn.Module):
469
+ """
470
+ Multi-Head attention layer with relative position encoding. Details can be found in
471
+ https://github.com/espnet/espnet/pull/2816. Paper: https://arxiv.org/abs/1901.02860.
472
+ """
473
+
474
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
475
+ """Construct an FastSpeech2ConformerAttention object."""
476
+ super().__init__()
477
+ # We assume d_v always equals dim_key
478
+ self.num_heads = module_config["num_attention_heads"]
479
+ self.hidden_size = config.hidden_size
480
+ self.dim_key = self.hidden_size // self.num_heads
481
+ self.head_dim = self.hidden_size // self.num_heads
482
+ self.linear_q = nn.Linear(self.hidden_size, self.hidden_size)
483
+ self.linear_k = nn.Linear(self.hidden_size, self.hidden_size)
484
+ self.linear_v = nn.Linear(self.hidden_size, self.hidden_size)
485
+ self.linear_out = nn.Linear(self.hidden_size, self.hidden_size)
486
+ self.dropout = nn.Dropout(p=module_config["attention_dropout_rate"])
487
+
488
+ # linear transformation for positional encoding
489
+ self.linear_pos = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
490
+ # these two learnable bias are used in matrix c and matrix d
491
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
492
+ self.pos_bias_u = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
493
+ self.pos_bias_v = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
494
+
495
+ def shift_relative_position_tensor(self, pos_tensor):
496
+ """
497
+ Args:
498
+ pos_tensor (torch.Tensor of shape (batch_size, head, time1, 2*time1-1)): Input tensor.
499
+ """
500
+ zero_pad = torch.zeros((*pos_tensor.size()[:3], 1), device=pos_tensor.device, dtype=pos_tensor.dtype)
501
+ pos_tensor_padded = torch.cat([zero_pad, pos_tensor], dim=-1)
502
+
503
+ pos_tensor_padded = pos_tensor_padded.view(*pos_tensor.size()[:2], pos_tensor.size(3) + 1, pos_tensor.size(2))
504
+ # only keep the positions from 0 to time2
505
+ pos_tensor = pos_tensor_padded[:, :, 1:].view_as(pos_tensor)[:, :, :, : pos_tensor.size(-1) // 2 + 1]
506
+
507
+ return pos_tensor
508
+
509
+ def forward(
510
+ self,
511
+ hidden_states: torch.Tensor,
512
+ attention_mask: Optional[torch.Tensor] = None,
513
+ pos_emb: Optional[torch.Tensor] = None,
514
+ output_attentions: Optional[torch.Tensor] = False,
515
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
516
+ """
517
+ Compute 'Scaled Dot Product Attention' with rel. positional encoding.
518
+
519
+ Args:
520
+ hidden_states (`torch.Tensor` of shape `(batch, time2, size)`): Values of the hidden states
521
+ attention_mask (`torch.Tensor` of shape `(batch, time1, time2)`): Mask tensor.
522
+ pos_emb (`torch.Tensor` of shape `(batch, 2*time1-1, size)`): Positional embedding tensor.
523
+ output_attentions (`bool`, *optional*):
524
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
525
+ returned tensors for more detail.
526
+ Returns:
527
+ `torch.Tensor`: Output tensor of shape `(batch, time1, d_model)`.
528
+ """
529
+ bsz, q_len, _ = hidden_states.size()
530
+ query_states = self.linear_q(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
531
+ key_states = self.linear_k(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
532
+ value_states = self.linear_v(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
533
+
534
+ bsz_pos = pos_emb.size(0)
535
+ pos_encoding = self.linear_pos(pos_emb).view(bsz_pos, -1, self.num_heads, self.head_dim)
536
+
537
+ # (batch_size, head, time1, dim_key)
538
+ query_with_bias_u = (query_states + self.pos_bias_u).transpose(1, 2)
539
+ # (batch_size, head, time1, dim_key)
540
+ query_with_bias_v = (query_states + self.pos_bias_v).transpose(1, 2)
541
+
542
+ # compute attention score
543
+ # first compute matrix a and matrix c
544
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
545
+ # (batch_size, head, time1, time2)
546
+ matrix_ac = torch.matmul(query_with_bias_u, key_states.permute(0, 2, 3, 1))
547
+
548
+ # compute matrix b and matrix d
549
+ # (batch_size, head, time1, 2*time1-1)
550
+ matrix_bd = torch.matmul(query_with_bias_v, pos_encoding.permute(0, 2, 3, 1))
551
+ matrix_bd = self.shift_relative_position_tensor(matrix_bd)
552
+
553
+ # (batch_size, head, time1, time2)
554
+ scores = (matrix_ac + matrix_bd) / math.sqrt(self.dim_key)
555
+
556
+ # Forward attention
557
+ if attention_mask is not None:
558
+ expected_size = (bsz, 1, q_len)
559
+ if attention_mask.size() != expected_size:
560
+ raise ValueError(f"Attention mask should be of size {expected_size}, but is {attention_mask.size()}")
561
+ attention_mask = attention_mask.unsqueeze(1).eq(0)
562
+ min_value = float(torch.finfo(scores.dtype).min)
563
+ scores = scores.masked_fill(attention_mask, min_value)
564
+ attn_weights = torch.softmax(scores, dim=-1).masked_fill(attention_mask, 0.0)
565
+ else:
566
+ attn_weights = torch.softmax(scores, dim=-1)
567
+
568
+ attn_weights = self.dropout(attn_weights)
569
+ attn_output = torch.matmul(attn_weights, value_states.transpose(1, 2))
570
+ attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, q_len, -1)
571
+
572
+ attn_output = self.linear_out(attn_output)
573
+
574
+ if not output_attentions:
575
+ attn_weights = None
576
+
577
+ return attn_output, attn_weights
578
+
579
+
580
+ class FastSpeech2ConformerConvolutionModule(nn.Module):
581
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
582
+ super().__init__()
583
+ # kernel_size should be an odd number for 'SAME' padding
584
+ channels = config.hidden_size
585
+ kernel_size = module_config["kernel_size"]
586
+ self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=True)
587
+ self.depthwise_conv = nn.Conv1d(
588
+ channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias=True
589
+ )
590
+ self.norm = nn.BatchNorm1d(channels)
591
+ self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=True)
592
+
593
+ def forward(self, hidden_states):
594
+ """
595
+ Compute convolution module.
596
+
597
+ Args:
598
+ hidden_states (`torch.Tensor` of shape `(batch, time, channels)`): Input tensor.
599
+
600
+ Returns:
601
+ `torch.Tensor`: Output tensor of shape `(batch, time, channels)`.
602
+
603
+ """
604
+ # exchange the temporal dimension and the feature dimension
605
+ hidden_states = hidden_states.transpose(1, 2)
606
+
607
+ # GLU mechanism, (batch_size, 2*channel, dim)
608
+ hidden_states = self.pointwise_conv1(hidden_states)
609
+ # (batch_size, channel, dim)
610
+ hidden_states = nn.functional.glu(hidden_states, dim=1)
611
+
612
+ # 1D Depthwise Conv
613
+ hidden_states = self.depthwise_conv(hidden_states)
614
+ hidden_states = self.norm(hidden_states)
615
+
616
+ hidden_states = hidden_states * torch.sigmoid(hidden_states)
617
+
618
+ hidden_states = self.pointwise_conv2(hidden_states)
619
+
620
+ return hidden_states.transpose(1, 2)
621
+
622
+
623
+ class FastSpeech2ConformerEncoderLayer(nn.Module):
624
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
625
+ super().__init__()
626
+
627
+ # self-attention module definition
628
+ self.self_attn = FastSpeech2ConformerAttention(config, module_config)
629
+
630
+ # feed-forward module definition
631
+ self.feed_forward = FastSpeech2ConformerMultiLayeredConv1d(config, module_config)
632
+
633
+ self.macaron_style = config.use_macaron_style_in_conformer
634
+ if self.macaron_style:
635
+ self.feed_forward_macaron = FastSpeech2ConformerMultiLayeredConv1d(config, module_config)
636
+ self.ff_macaron_layer_norm = nn.LayerNorm(config.hidden_size)
637
+ self.ff_scale = 0.5
638
+ else:
639
+ self.ff_scale = 1.0
640
+
641
+ # convolution module definition
642
+ self.use_cnn_module = config.use_cnn_in_conformer
643
+ if self.use_cnn_module:
644
+ self.conv_module = FastSpeech2ConformerConvolutionModule(config, module_config)
645
+ self.conv_layer_norm = nn.LayerNorm(config.hidden_size)
646
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size)
647
+
648
+ self.ff_layer_norm = nn.LayerNorm(config.hidden_size)
649
+
650
+ self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size)
651
+
652
+ self.dropout = nn.Dropout(module_config["dropout_rate"])
653
+ self.size = config.hidden_size
654
+ self.normalize_before = module_config["normalize_before"]
655
+ self.concat_after = module_config["concat_after"]
656
+ if self.concat_after:
657
+ self.concat_linear = nn.Linear(config.hidden_size + config.hidden_size, config.hidden_size)
658
+
659
+ def forward(
660
+ self,
661
+ hidden_states: torch.Tensor,
662
+ pos_emb: Optional[torch.Tensor] = None,
663
+ attention_mask: Optional[torch.Tensor] = None,
664
+ output_attentions: Optional[torch.Tensor] = False,
665
+ ):
666
+ """
667
+ Compute encoded features.
668
+
669
+ Args:
670
+ hidden_states (`torch.Tensor` of shape `(batch, time, size)`): Input tensor.
671
+ pos_emb (`torch.Tensor` of shape `(1, time, size)`): Positional embeddings tensor.
672
+ attention_mask (`torch.Tensor` of shape `(batch, time)`): Attention mask tensor for the input.
673
+ output_attentions (`bool`, *optional*):
674
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
675
+ returned tensors for more detail.
676
+ Returns:
677
+ `torch.Tensor`: Output tensor of shape `(batch, time, size)`.
678
+
679
+ """
680
+ # whether to use macaron style
681
+ if self.macaron_style:
682
+ residual = hidden_states
683
+ if self.normalize_before:
684
+ hidden_states = self.ff_macaron_layer_norm(hidden_states)
685
+ hidden_states = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(hidden_states))
686
+ if not self.normalize_before:
687
+ hidden_states = self.ff_macaron_layer_norm(hidden_states)
688
+
689
+ # multi-headed self-attention module
690
+ residual = hidden_states
691
+ if self.normalize_before:
692
+ hidden_states = self.self_attn_layer_norm(hidden_states)
693
+
694
+ attention_output, attention_scores = self.self_attn(
695
+ hidden_states, attention_mask=attention_mask, pos_emb=pos_emb, output_attentions=output_attentions
696
+ )
697
+
698
+ if self.concat_after:
699
+ x_concat = torch.cat((hidden_states, attention_output), dim=-1)
700
+ hidden_states = self.concat_linear(x_concat)
701
+ hidden_states = residual + hidden_states
702
+ else:
703
+ hidden_states = self.dropout(attention_output)
704
+ hidden_states = residual + hidden_states
705
+ if not self.normalize_before:
706
+ hidden_states = self.self_attn_layer_norm(hidden_states)
707
+
708
+ # convolution module
709
+ if self.use_cnn_module:
710
+ residual = hidden_states
711
+ if self.normalize_before:
712
+ hidden_states = self.conv_layer_norm(hidden_states)
713
+ hidden_states = self.conv_module(hidden_states)
714
+ hidden_states = self.dropout(hidden_states)
715
+ hidden_states = residual + hidden_states
716
+ if not self.normalize_before:
717
+ hidden_states = self.conv_layer_norm(hidden_states)
718
+
719
+ # feed forward module
720
+ residual = hidden_states
721
+ if self.normalize_before:
722
+ hidden_states = self.ff_layer_norm(hidden_states)
723
+ hidden_states = self.feed_forward(hidden_states)
724
+ hidden_states = self.dropout(hidden_states)
725
+ hidden_states = residual + self.ff_scale * hidden_states
726
+ if not self.normalize_before:
727
+ hidden_states = self.ff_layer_norm(hidden_states)
728
+
729
+ if self.conv_module is not None:
730
+ hidden_states = self.final_layer_norm(hidden_states)
731
+
732
+ outputs = (hidden_states,)
733
+
734
+ if output_attentions:
735
+ outputs += (attention_scores,)
736
+
737
+ return outputs
738
+
739
+
740
+ class FastSpeech2ConformerMultiLayeredConv1d(nn.Module):
741
+ """
742
+ Multi-layered conv1d for Transformer block.
743
+
744
+ This is a module of multi-layered conv1d designed to replace positionwise feed-forward network in Transformer
745
+ block, which is introduced in 'FastSpeech: Fast, Robust and Controllable Text to Speech'
746
+ https://arxiv.org/pdf/1905.09263.pdf
747
+ """
748
+
749
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
750
+ """
751
+ Initialize FastSpeech2ConformerMultiLayeredConv1d module.
752
+
753
+ Args:
754
+ input_channels (`int`): Number of input channels.
755
+ hidden_channels (`int`): Number of hidden channels.
756
+ kernel_size (`int`): Kernel size of conv1d.
757
+ dropout_rate (`float`): Dropout rate.
758
+ """
759
+ super().__init__()
760
+ input_channels = config.hidden_size
761
+ hidden_channels = module_config["linear_units"]
762
+ kernel_size = config.positionwise_conv_kernel_size
763
+ self.conv1 = nn.Conv1d(input_channels, hidden_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
764
+ self.conv2 = nn.Conv1d(hidden_channels, input_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
765
+ self.dropout = nn.Dropout(module_config["dropout_rate"])
766
+
767
+ def forward(self, hidden_states):
768
+ """
769
+ Calculate forward propagation.
770
+
771
+ Args:
772
+ hidden_states (torch.Tensor): Batch of input tensors (batch_size, time, input_channels).
773
+
774
+ Returns:
775
+ torch.Tensor: Batch of output tensors (batch_size, time, hidden_channels).
776
+ """
777
+ hidden_states = hidden_states.transpose(-1, 1)
778
+ hidden_states = self.conv1(hidden_states)
779
+ hidden_states = torch.relu(hidden_states)
780
+ hidden_states = self.dropout(hidden_states)
781
+ hidden_states = self.conv2(hidden_states)
782
+ hidden_states = hidden_states.transpose(-1, 1)
783
+ return hidden_states
784
+
785
+
786
+ class FastSpeech2ConformerRelPositionalEncoding(nn.Module):
787
+ """
788
+ Args:
789
+ Relative positional encoding module (new implementation). Details can be found in
790
+ https://github.com/espnet/espnet/pull/2816. See : Appendix Batch in https://arxiv.org/abs/1901.02860
791
+ config (`FastSpeech2ConformerConfig`):
792
+ FastSpeech2ConformerConfig instance.
793
+ module_config (`dict`):
794
+ Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`.
795
+ """
796
+
797
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
798
+ """
799
+ Construct an PositionalEncoding object.
800
+ """
801
+ super().__init__()
802
+ self.embed_dim = config.hidden_size
803
+ self.input_scale = math.sqrt(self.embed_dim)
804
+ self.dropout = nn.Dropout(p=module_config["positional_dropout_rate"])
805
+ self.pos_enc = None
806
+ self.max_len = 5000
807
+ self.extend_pos_enc(torch.tensor(0.0).expand(1, self.max_len))
808
+
809
+ def extend_pos_enc(self, x):
810
+ """Reset the positional encodings."""
811
+ if self.pos_enc is not None:
812
+ # self.pos_enc contains both positive and negative parts
813
+ # the length of self.pos_enc is 2 * input_len - 1
814
+ if self.pos_enc.size(1) >= x.size(1) * 2 - 1:
815
+ if self.pos_enc.dtype != x.dtype or self.pos_enc.device != x.device:
816
+ self.pos_enc = self.pos_enc.to(dtype=x.dtype, device=x.device)
817
+ return
818
+ # Suppose `i` means to the position of query vector and `j` means the
819
+ # position of key vector. We use position relative positions when keys
820
+ # are to the left (i>j) and negative relative positions otherwise (i<j).
821
+ pos_enc_positive = torch.zeros(x.size(1), self.embed_dim)
822
+ pos_enc_negative = torch.zeros(x.size(1), self.embed_dim)
823
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
824
+ div_term = torch.exp(
825
+ torch.arange(0, self.embed_dim, 2, dtype=torch.float32) * -(math.log(10000.0) / self.embed_dim)
826
+ )
827
+ pos_enc_positive[:, 0::2] = torch.sin(position * div_term)
828
+ pos_enc_positive[:, 1::2] = torch.cos(position * div_term)
829
+ pos_enc_negative[:, 0::2] = torch.sin(-1 * position * div_term)
830
+ pos_enc_negative[:, 1::2] = torch.cos(-1 * position * div_term)
831
+
832
+ # Reserve the order of positive indices and concat both positive and
833
+ # negative indices. This is used to support the shifting trick
834
+ # as in https://arxiv.org/abs/1901.02860
835
+ pos_enc_positive = torch.flip(pos_enc_positive, [0]).unsqueeze(0)
836
+ pos_enc_negative = pos_enc_negative[1:].unsqueeze(0)
837
+ pos_enc = torch.cat([pos_enc_positive, pos_enc_negative], dim=1)
838
+ self.pos_enc = pos_enc.to(device=x.device, dtype=x.dtype)
839
+
840
+ def forward(self, feature_representation):
841
+ """
842
+ Args:
843
+ feature_representation (`torch.Tensor` of shape (batch_size, time, `*`)):
844
+ Input tensor.
845
+
846
+ Returns:
847
+ `torch.Tensor`: Encoded tensor (batch_size, time, `*`).
848
+ """
849
+ self.extend_pos_enc(feature_representation)
850
+ hidden_states = feature_representation * self.input_scale
851
+ center_idx = self.pos_enc.size(1) // 2
852
+ pos_emb = self.pos_enc[:, center_idx - hidden_states.size(1) + 1 : center_idx + hidden_states.size(1)]
853
+ return self.dropout(hidden_states), self.dropout(pos_emb)
854
+
855
+
856
+ class FastSpeech2ConformerEncoder(nn.Module):
857
+ """
858
+ FastSpeech2ConformerEncoder encoder module.
859
+
860
+ Args:
861
+ config (`FastSpeech2ConformerConfig`):
862
+ FastSpeech2ConformerConfig instance.
863
+ module_config (`dict`):
864
+ Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`.
865
+ use_encoder_input_layer (`bool`, *optional*, defaults to `False`):
866
+ Input layer type.
867
+ """
868
+
869
+ def __init__(
870
+ self,
871
+ config: FastSpeech2ConformerConfig,
872
+ module_config,
873
+ use_encoder_input_layer=False,
874
+ ):
875
+ super().__init__()
876
+
877
+ self.embed = None
878
+ if use_encoder_input_layer:
879
+ self.embed = nn.Embedding(
880
+ num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, padding_idx=0
881
+ )
882
+
883
+ self.pos_enc = FastSpeech2ConformerRelPositionalEncoding(config, module_config)
884
+
885
+ self.conformer_layers = nn.ModuleList(
886
+ [FastSpeech2ConformerEncoderLayer(config, module_config) for _ in range(module_config["layers"])]
887
+ )
888
+
889
+ def forward(
890
+ self,
891
+ input_tensor: torch.LongTensor,
892
+ attention_mask: Optional[bool] = None,
893
+ output_hidden_states: Optional[bool] = None,
894
+ output_attentions: Optional[bool] = False,
895
+ return_dict: Optional[bool] = None,
896
+ ):
897
+ """
898
+ Args:
899
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
900
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
901
+ provide it.
902
+
903
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
904
+ [`PreTrainedTokenizer.__call__`] for details.
905
+
906
+ [What are input IDs?](../glossary#input-ids)
907
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
908
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
909
+
910
+ - 1 for tokens that are **not masked**,
911
+ - 0 for tokens that are **masked**.
912
+
913
+ [What are attention masks?](../glossary#attention-mask)
914
+ output_hidden_states (`bool`, *optional*):
915
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
916
+ for more detail.
917
+ output_attentions (`bool`, *optional*):
918
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
919
+ returned tensors for more detail.
920
+ return_dict (`bool`, *optional*):
921
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
922
+ Returns:
923
+ `torch.Tensor`:
924
+ Output tensor of shape `(batch, time, attention_dim)`.
925
+ """
926
+ feature_representation = input_tensor
927
+ if self.embed is not None:
928
+ feature_representation = self.embed(feature_representation)
929
+
930
+ hidden_states, pos_emb = self.pos_enc(feature_representation)
931
+
932
+ all_hidden_states = () if output_hidden_states else None
933
+ all_self_attentions = () if output_attentions else None
934
+
935
+ for conformer_layer in self.conformer_layers:
936
+ if output_hidden_states:
937
+ all_hidden_states = all_hidden_states + (hidden_states,)
938
+
939
+ layer_outputs = conformer_layer(hidden_states, pos_emb, attention_mask, output_attentions)
940
+ hidden_states = layer_outputs[0]
941
+
942
+ if output_attentions:
943
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
944
+
945
+ # Add last layer
946
+ if output_hidden_states:
947
+ all_hidden_states = all_hidden_states + (hidden_states,)
948
+
949
+ if not return_dict:
950
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
951
+ return BaseModelOutput(
952
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions
953
+ )
954
+
955
+
956
+ class FastSpeech2ConformerLoss(nn.Module):
957
+ def __init__(self, config: FastSpeech2ConformerConfig):
958
+ super().__init__()
959
+
960
+ use_masking = config.use_masking
961
+ use_weighted_masking = config.use_weighted_masking
962
+
963
+ if use_masking and use_weighted_masking:
964
+ raise ValueError("Either use_masking or use_weighted_masking can be True, but not both.")
965
+
966
+ self.use_masking = use_masking
967
+ self.use_weighted_masking = use_weighted_masking
968
+
969
+ # define criterions
970
+ reduction = "none" if self.use_weighted_masking else "mean"
971
+ self.l1_criterion = nn.L1Loss(reduction=reduction)
972
+ self.mse_criterion = nn.MSELoss(reduction=reduction)
973
+ self.duration_criterion = nn.MSELoss(reduction=reduction)
974
+ self.log_domain_offset = 1.0
975
+
976
+ def forward(
977
+ self,
978
+ outputs_after_postnet,
979
+ outputs_before_postnet,
980
+ duration_outputs,
981
+ pitch_outputs,
982
+ energy_outputs,
983
+ spectrogram_labels,
984
+ duration_labels,
985
+ pitch_labels,
986
+ energy_labels,
987
+ duration_mask,
988
+ spectrogram_mask,
989
+ ):
990
+ """
991
+ Args:
992
+ outputs_after_postnet (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
993
+ Batch of outputs after postnet.
994
+ outputs_before_postnet (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
995
+ Batch of outputs before postnet.
996
+ duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length)`):
997
+ Batch of outputs of duration predictor.
998
+ pitch_outputs (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
999
+ Batch of outputs of pitch predictor.
1000
+ energy_outputs (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
1001
+ Batch of outputs of energy predictor.
1002
+ spectrogram_labels (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
1003
+ Batch of target features.
1004
+ duration_labels (`torch.LongTensor` of shape `(batch_size, max_text_length)`): Batch of durations.
1005
+ pitch_labels (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
1006
+ Batch of target token-averaged pitch.
1007
+ energy_labels (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
1008
+ Batch of target token-averaged energy.
1009
+ duration_mask (`torch.LongTensor`):
1010
+ Mask used to discern which values the duration loss should be calculated for.
1011
+ spectrogram_mask (`torch.LongTensor`):
1012
+ Mask used to discern which values the spectrogam loss should be calculated for.
1013
+
1014
+ Returns:
1015
+ `tuple(torch.FloatTensor)`: Tuple of tensors containing, in order, the L1 loss value, duration predictor
1016
+ loss value, pitch predictor loss value, and energy predictor loss value.
1017
+
1018
+ """
1019
+ pitch_and_energy_masks = duration_mask.unsqueeze(-1)
1020
+
1021
+ # apply mask to remove padded part
1022
+ if self.use_masking:
1023
+ outputs_before_postnet = outputs_before_postnet.masked_select(spectrogram_mask)
1024
+ if outputs_after_postnet is not None:
1025
+ outputs_after_postnet = outputs_after_postnet.masked_select(spectrogram_mask)
1026
+ spectrogram_labels = spectrogram_labels.masked_select(spectrogram_mask)
1027
+ duration_outputs = duration_outputs.masked_select(duration_mask)
1028
+ duration_labels = duration_labels.masked_select(duration_mask)
1029
+ pitch_outputs = pitch_outputs.masked_select(pitch_and_energy_masks)
1030
+ energy_outputs = energy_outputs.masked_select(pitch_and_energy_masks)
1031
+ pitch_labels = pitch_labels.masked_select(pitch_and_energy_masks)
1032
+ energy_labels = energy_labels.masked_select(pitch_and_energy_masks)
1033
+
1034
+ # calculate loss
1035
+ l1_loss = self.l1_criterion(outputs_before_postnet, spectrogram_labels)
1036
+ if outputs_after_postnet is not None:
1037
+ l1_loss = l1_loss + self.l1_criterion(outputs_after_postnet, spectrogram_labels)
1038
+ duration_labels = torch.log(duration_labels.float() + self.log_domain_offset)
1039
+ duration_loss = self.duration_criterion(duration_outputs, duration_labels)
1040
+ pitch_loss = self.mse_criterion(pitch_outputs, pitch_labels)
1041
+ energy_loss = self.mse_criterion(energy_outputs, energy_labels)
1042
+
1043
+ # make weighted mask and apply it
1044
+ if self.use_weighted_masking:
1045
+ spectrogram_mask = nn.functional.pad(
1046
+ spectrogram_mask.transpose(1, 2),
1047
+ [0, spectrogram_labels.size(1) - spectrogram_mask.size(1), 0, 0, 0, 0],
1048
+ value=False,
1049
+ ).transpose(1, 2)
1050
+
1051
+ out_weights = spectrogram_mask.float() / spectrogram_mask.sum(dim=1, keepdim=True).float()
1052
+ out_weights /= spectrogram_labels.size(0) * spectrogram_labels.size(2)
1053
+ duration_weights = duration_mask.float() / duration_mask.sum(dim=1, keepdim=True).float()
1054
+ duration_weights /= duration_labels.size(0)
1055
+
1056
+ # apply weight
1057
+ l1_loss = l1_loss.mul(out_weights).masked_select(spectrogram_mask).sum()
1058
+ duration_loss = duration_loss.mul(duration_weights).masked_select(duration_mask).sum()
1059
+ pitch_weights = duration_weights.unsqueeze(-1)
1060
+ pitch_loss = pitch_loss.mul(pitch_weights).masked_select(pitch_and_energy_masks).sum()
1061
+ energy_loss = energy_loss.mul(pitch_weights).masked_select(pitch_and_energy_masks).sum()
1062
+
1063
+ return l1_loss + duration_loss + pitch_loss + energy_loss
1064
+
1065
+
1066
+ class FastSpeech2ConformerPreTrainedModel(PreTrainedModel):
1067
+ """
1068
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1069
+ models.
1070
+ """
1071
+
1072
+ config_class = FastSpeech2ConformerConfig
1073
+ base_model_prefix = "fastspeech2_conformer"
1074
+
1075
+ main_input_name = "input_ids"
1076
+
1077
+ def _init_weights(self, module):
1078
+ """Initialize the weights"""
1079
+ if isinstance(module, (nn.LayerNorm)):
1080
+ module.bias.data.zero_()
1081
+ module.weight.data.fill_(1.0)
1082
+ elif isinstance(module, nn.Conv1d):
1083
+ nn.init.kaiming_normal_(module.weight)
1084
+ if module.bias is not None:
1085
+ key = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
1086
+ nn.init.uniform_(module.bias, a=-key, b=key)
1087
+ elif isinstance(module, nn.Embedding):
1088
+ module.weight.data.normal_()
1089
+ if module.padding_idx is not None:
1090
+ module.weight.data[module.padding_idx].zero_()
1091
+ elif isinstance(module, FastSpeech2ConformerAttention):
1092
+ nn.init.xavier_uniform_(module.pos_bias_u)
1093
+ nn.init.xavier_uniform_(module.pos_bias_v)
1094
+
1095
+ def _set_gradient_checkpointing(self, module, value=False):
1096
+ if isinstance(module, FastSpeech2ConformerEncoder):
1097
+ module.gradient_checkpointing = value
1098
+
1099
+
1100
+ @add_start_docstrings(
1101
+ """FastSpeech2Conformer Model.""",
1102
+ FASTSPEECH2_CONFORMER_START_DOCSTRING,
1103
+ )
1104
+ class FastSpeech2ConformerModel(FastSpeech2ConformerPreTrainedModel):
1105
+ """
1106
+ FastSpeech 2 module.
1107
+
1108
+ This is a module of FastSpeech 2 described in 'FastSpeech 2: Fast and High-Quality End-to-End Text to Speech'
1109
+ https://arxiv.org/abs/2006.04558. Instead of quantized pitch and energy, we use token-averaged value introduced in
1110
+ FastPitch: Parallel Text-to-speech with Pitch Prediction. The encoder and decoder are Conformers instead of regular
1111
+ Transformers.
1112
+ """
1113
+
1114
+ def __init__(self, config: FastSpeech2ConformerConfig):
1115
+ super().__init__(config)
1116
+ self.config = config
1117
+
1118
+ # store hyperparameters
1119
+ self.vocab_size = config.vocab_size
1120
+ self.num_mel_bins = config.num_mel_bins
1121
+ self.hidden_size = config.hidden_size
1122
+ self.reduction_factor = config.reduction_factor
1123
+ self.stop_gradient_from_pitch_predictor = config.stop_gradient_from_pitch_predictor
1124
+ self.stop_gradient_from_energy_predictor = config.stop_gradient_from_energy_predictor
1125
+
1126
+ self.multilingual_model = config.num_languages is not None and config.num_languages > 1
1127
+ if self.multilingual_model:
1128
+ self.language_id_embedding = torch.nn.Embedding(config.num_languages, self.hidden_size)
1129
+
1130
+ self.multispeaker_model = config.num_speakers is not None and config.num_speakers > 1
1131
+ if self.multispeaker_model:
1132
+ self.speaker_id_embedding = torch.nn.Embedding(config.num_speakers, config.hidden_size)
1133
+
1134
+ self.speaker_embed_dim = config.speaker_embed_dim
1135
+ if self.speaker_embed_dim:
1136
+ self.projection = nn.Linear(config.hidden_size + self.speaker_embed_dim, config.hidden_size)
1137
+
1138
+ self.encoder = FastSpeech2ConformerEncoder(config, config.encoder_config, use_encoder_input_layer=True)
1139
+
1140
+ self.duration_predictor = FastSpeech2ConformerDurationPredictor(config)
1141
+
1142
+ self.pitch_predictor = FastSpeech2ConformerVariancePredictor(
1143
+ config,
1144
+ num_layers=config.pitch_predictor_layers,
1145
+ num_chans=config.pitch_predictor_channels,
1146
+ kernel_size=config.pitch_predictor_kernel_size,
1147
+ dropout_rate=config.pitch_predictor_dropout,
1148
+ )
1149
+ # continuous pitch + FastPitch style avg
1150
+ self.pitch_embed = FastSpeech2ConformerVarianceEmbedding(
1151
+ out_channels=self.hidden_size,
1152
+ kernel_size=config.pitch_embed_kernel_size,
1153
+ padding=(config.pitch_embed_kernel_size - 1) // 2,
1154
+ dropout_rate=config.pitch_embed_dropout,
1155
+ )
1156
+
1157
+ self.energy_predictor = FastSpeech2ConformerVariancePredictor(
1158
+ config,
1159
+ num_layers=config.energy_predictor_layers,
1160
+ num_chans=config.energy_predictor_channels,
1161
+ kernel_size=config.energy_predictor_kernel_size,
1162
+ dropout_rate=config.energy_predictor_dropout,
1163
+ )
1164
+ # continuous energy + FastPitch style avg
1165
+ self.energy_embed = FastSpeech2ConformerVarianceEmbedding(
1166
+ out_channels=self.hidden_size,
1167
+ kernel_size=config.energy_embed_kernel_size,
1168
+ padding=(config.energy_embed_kernel_size - 1) // 2,
1169
+ dropout_rate=config.energy_embed_dropout,
1170
+ )
1171
+
1172
+ # The decoder is an encoder
1173
+ self.decoder = FastSpeech2ConformerEncoder(config, config.decoder_config, use_encoder_input_layer=False)
1174
+
1175
+ self.speech_decoder_postnet = FastSpeech2ConformerSpeechDecoderPostnet(config)
1176
+
1177
+ self.criterion = FastSpeech2ConformerLoss(config)
1178
+
1179
+ self.post_init()
1180
+
1181
+ @replace_return_docstrings(output_type=FastSpeech2ConformerModelOutput, config_class=_CONFIG_FOR_DOC)
1182
+ def forward(
1183
+ self,
1184
+ input_ids: torch.LongTensor,
1185
+ attention_mask: Optional[torch.LongTensor] = None,
1186
+ spectrogram_labels: Optional[torch.FloatTensor] = None,
1187
+ duration_labels: Optional[torch.LongTensor] = None,
1188
+ pitch_labels: Optional[torch.FloatTensor] = None,
1189
+ energy_labels: Optional[torch.FloatTensor] = None,
1190
+ speaker_ids: Optional[torch.LongTensor] = None,
1191
+ lang_ids: Optional[torch.LongTensor] = None,
1192
+ speaker_embedding: Optional[torch.FloatTensor] = None,
1193
+ return_dict: Optional[bool] = None,
1194
+ output_attentions: Optional[bool] = None,
1195
+ output_hidden_states: Optional[bool] = None,
1196
+ ) -> Union[Tuple, FastSpeech2ConformerModelOutput]:
1197
+ """
1198
+ Args:
1199
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1200
+ Input sequence of text vectors.
1201
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*, defaults to `None`):
1202
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
1203
+ `[0, 1]`: 0 for tokens that are **masked**, 1 for tokens that are **not masked**.
1204
+ spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`):
1205
+ Batch of padded target features.
1206
+ duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`):
1207
+ Batch of padded durations.
1208
+ pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
1209
+ Batch of padded token-averaged pitch.
1210
+ energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
1211
+ Batch of padded token-averaged energy.
1212
+ speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
1213
+ Speaker ids used to condition features of speech output by the model.
1214
+ lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
1215
+ Language ids used to condition features of speech output by the model.
1216
+ speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`):
1217
+ Embedding containing conditioning signals for the features of the speech.
1218
+ return_dict (`bool`, *optional*, defaults to `None`):
1219
+ Whether or not to return a [`FastSpeech2ConformerModelOutput`] instead of a plain tuple.
1220
+ output_attentions (`bool`, *optional*, defaults to `None`):
1221
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1222
+ returned tensors for more detail.
1223
+ output_hidden_states (`bool`, *optional*, defaults to `None`):
1224
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1225
+ for more detail.
1226
+
1227
+ Returns:
1228
+
1229
+ Example:
1230
+
1231
+ ```python
1232
+ >>> from transformers import (
1233
+ ... FastSpeech2ConformerTokenizer,
1234
+ ... FastSpeech2ConformerModel,
1235
+ ... FastSpeech2ConformerHifiGan,
1236
+ ... )
1237
+
1238
+ >>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
1239
+ >>> inputs = tokenizer("some text to convert to speech", return_tensors="pt")
1240
+ >>> input_ids = inputs["input_ids"]
1241
+
1242
+ >>> model = FastSpeech2ConformerModel.from_pretrained("espnet/fastspeech2_conformer")
1243
+ >>> output_dict = model(input_ids, return_dict=True)
1244
+ >>> spectrogram = output_dict["spectrogram"]
1245
+
1246
+ >>> vocoder = FastSpeech2ConformerHifiGan.from_pretrained("espnet/fastspeech2_conformer_hifigan")
1247
+ >>> waveform = vocoder(spectrogram)
1248
+ >>> print(waveform.shape)
1249
+ torch.Size([1, 49664])
1250
+ ```
1251
+ """
1252
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1253
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1254
+ output_hidden_states = (
1255
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1256
+ )
1257
+
1258
+ if attention_mask is None:
1259
+ attention_mask = torch.ones(input_ids.shape)
1260
+
1261
+ has_missing_labels = (
1262
+ spectrogram_labels is None or duration_labels is None or pitch_labels is None or energy_labels is None
1263
+ )
1264
+ if self.training and has_missing_labels:
1265
+ raise ValueError("All labels must be provided to run in training mode.")
1266
+
1267
+ # forward encoder
1268
+ text_masks = attention_mask.unsqueeze(-2)
1269
+
1270
+ encoder_outputs = self.encoder(
1271
+ input_ids,
1272
+ text_masks,
1273
+ output_hidden_states=output_hidden_states,
1274
+ output_attentions=output_attentions,
1275
+ return_dict=return_dict,
1276
+ )
1277
+ hidden_states = encoder_outputs[0]
1278
+
1279
+ # Integrate with language id, speaker id, and speaker embedding
1280
+ if self.multispeaker_model and speaker_ids is not None:
1281
+ speaker_id_embeddings = self.speaker_id_embedding(speaker_ids.view(-1))
1282
+ hidden_states = hidden_states + speaker_id_embeddings.unsqueeze(1)
1283
+
1284
+ if self.multilingual_model and lang_ids is not None:
1285
+ language_id_embbedings = self.language_id_embedding(lang_ids.view(-1))
1286
+ hidden_states = hidden_states + language_id_embbedings.unsqueeze(1)
1287
+
1288
+ if self.speaker_embed_dim is not None and speaker_embedding is not None:
1289
+ embeddings_expanded = (
1290
+ nn.functional.normalize(speaker_embedding).unsqueeze(1).expand(-1, hidden_states.size(1), -1)
1291
+ )
1292
+ hidden_states = self.projection(torch.cat([hidden_states, embeddings_expanded], dim=-1))
1293
+
1294
+ # forward duration predictor and variance predictors
1295
+ duration_mask = ~attention_mask.bool()
1296
+
1297
+ if self.stop_gradient_from_pitch_predictor:
1298
+ pitch_predictions = self.pitch_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1))
1299
+ else:
1300
+ pitch_predictions = self.pitch_predictor(hidden_states, duration_mask.unsqueeze(-1))
1301
+
1302
+ if self.stop_gradient_from_energy_predictor:
1303
+ energy_predictions = self.energy_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1))
1304
+ else:
1305
+ energy_predictions = self.energy_predictor(hidden_states, duration_mask.unsqueeze(-1))
1306
+
1307
+ duration_predictions = self.duration_predictor(hidden_states)
1308
+ duration_predictions = duration_predictions.masked_fill(duration_mask, 0.0)
1309
+
1310
+ if not self.training:
1311
+ # use prediction in inference
1312
+ embedded_pitch_curve = self.pitch_embed(pitch_predictions)
1313
+ embedded_energy_curve = self.energy_embed(energy_predictions)
1314
+ hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve
1315
+ hidden_states = length_regulator(hidden_states, duration_predictions, self.config.speaking_speed)
1316
+ else:
1317
+ # use groundtruth in training
1318
+ embedded_pitch_curve = self.pitch_embed(pitch_labels)
1319
+ embedded_energy_curve = self.energy_embed(energy_labels)
1320
+ hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve
1321
+ hidden_states = length_regulator(hidden_states, duration_labels)
1322
+
1323
+ # forward decoder
1324
+ if not self.training:
1325
+ hidden_mask = None
1326
+ else:
1327
+ spectrogram_mask = (spectrogram_labels != -100).any(dim=-1)
1328
+ spectrogram_mask = spectrogram_mask.int()
1329
+ if self.reduction_factor > 1:
1330
+ length_dim = spectrogram_mask.shape[1] - spectrogram_mask.shape[1] % self.reduction_factor
1331
+ spectrogram_mask = spectrogram_mask[:, :, :length_dim]
1332
+ hidden_mask = spectrogram_mask.unsqueeze(-2)
1333
+
1334
+ decoder_outputs = self.decoder(
1335
+ hidden_states,
1336
+ hidden_mask,
1337
+ output_hidden_states=output_hidden_states,
1338
+ output_attentions=output_attentions,
1339
+ return_dict=return_dict,
1340
+ )
1341
+
1342
+ outputs_before_postnet, outputs_after_postnet = self.speech_decoder_postnet(decoder_outputs[0])
1343
+
1344
+ loss = None
1345
+ if self.training:
1346
+ # calculate loss
1347
+ loss_duration_mask = ~duration_mask
1348
+ loss_spectrogram_mask = spectrogram_mask.unsqueeze(-1).bool()
1349
+ loss = self.criterion(
1350
+ outputs_after_postnet=outputs_after_postnet,
1351
+ outputs_before_postnet=outputs_before_postnet,
1352
+ duration_outputs=duration_predictions,
1353
+ pitch_outputs=pitch_predictions,
1354
+ energy_outputs=energy_predictions,
1355
+ spectrogram_labels=spectrogram_labels,
1356
+ duration_labels=duration_labels,
1357
+ pitch_labels=pitch_labels,
1358
+ energy_labels=energy_labels,
1359
+ duration_mask=loss_duration_mask,
1360
+ spectrogram_mask=loss_spectrogram_mask,
1361
+ )
1362
+
1363
+ if not return_dict:
1364
+ postnet_outputs = (outputs_after_postnet,)
1365
+ audio_feature_predictions = (
1366
+ duration_predictions,
1367
+ pitch_predictions,
1368
+ energy_predictions,
1369
+ )
1370
+ outputs = postnet_outputs + encoder_outputs + decoder_outputs[1:] + audio_feature_predictions
1371
+ return ((loss,) + outputs) if loss is not None else outputs
1372
+
1373
+ return FastSpeech2ConformerModelOutput(
1374
+ loss=loss,
1375
+ spectrogram=outputs_after_postnet,
1376
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1377
+ encoder_hidden_states=encoder_outputs.hidden_states,
1378
+ encoder_attentions=encoder_outputs.attentions,
1379
+ decoder_hidden_states=decoder_outputs.hidden_states,
1380
+ decoder_attentions=decoder_outputs.attentions,
1381
+ duration_outputs=duration_predictions,
1382
+ pitch_outputs=pitch_predictions,
1383
+ energy_outputs=energy_predictions,
1384
+ )
1385
+
1386
+
1387
+ # Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock
1388
+ class HifiGanResidualBlock(nn.Module):
1389
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1):
1390
+ super().__init__()
1391
+ self.leaky_relu_slope = leaky_relu_slope
1392
+
1393
+ self.convs1 = nn.ModuleList(
1394
+ [
1395
+ nn.Conv1d(
1396
+ channels,
1397
+ channels,
1398
+ kernel_size,
1399
+ stride=1,
1400
+ dilation=dilation[i],
1401
+ padding=self.get_padding(kernel_size, dilation[i]),
1402
+ )
1403
+ for i in range(len(dilation))
1404
+ ]
1405
+ )
1406
+ self.convs2 = nn.ModuleList(
1407
+ [
1408
+ nn.Conv1d(
1409
+ channels,
1410
+ channels,
1411
+ kernel_size,
1412
+ stride=1,
1413
+ dilation=1,
1414
+ padding=self.get_padding(kernel_size, 1),
1415
+ )
1416
+ for _ in range(len(dilation))
1417
+ ]
1418
+ )
1419
+
1420
+ def get_padding(self, kernel_size, dilation=1):
1421
+ return (kernel_size * dilation - dilation) // 2
1422
+
1423
+ def apply_weight_norm(self):
1424
+ for layer in self.convs1:
1425
+ nn.utils.weight_norm(layer)
1426
+ for layer in self.convs2:
1427
+ nn.utils.weight_norm(layer)
1428
+
1429
+ def remove_weight_norm(self):
1430
+ for layer in self.convs1:
1431
+ nn.utils.remove_weight_norm(layer)
1432
+ for layer in self.convs2:
1433
+ nn.utils.remove_weight_norm(layer)
1434
+
1435
+ def forward(self, hidden_states):
1436
+ for conv1, conv2 in zip(self.convs1, self.convs2):
1437
+ residual = hidden_states
1438
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
1439
+ hidden_states = conv1(hidden_states)
1440
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
1441
+ hidden_states = conv2(hidden_states)
1442
+ hidden_states = hidden_states + residual
1443
+ return hidden_states
1444
+
1445
+
1446
+ @add_start_docstrings(
1447
+ """HiFi-GAN vocoder.""",
1448
+ HIFIGAN_START_DOCSTRING,
1449
+ )
1450
+ # Copied from transformers.models.speecht5.modeling_speecht5.SpeechT5HifiGan with SpeechT5->FastSpeech2Conformer
1451
+ class FastSpeech2ConformerHifiGan(PreTrainedModel):
1452
+ config_class = FastSpeech2ConformerHifiGanConfig
1453
+ main_input_name = "spectrogram"
1454
+
1455
+ def __init__(self, config: FastSpeech2ConformerHifiGanConfig):
1456
+ super().__init__(config)
1457
+ self.num_kernels = len(config.resblock_kernel_sizes)
1458
+ self.num_upsamples = len(config.upsample_rates)
1459
+ self.conv_pre = nn.Conv1d(
1460
+ config.model_in_dim,
1461
+ config.upsample_initial_channel,
1462
+ kernel_size=7,
1463
+ stride=1,
1464
+ padding=3,
1465
+ )
1466
+
1467
+ self.upsampler = nn.ModuleList()
1468
+ for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
1469
+ self.upsampler.append(
1470
+ nn.ConvTranspose1d(
1471
+ config.upsample_initial_channel // (2**i),
1472
+ config.upsample_initial_channel // (2 ** (i + 1)),
1473
+ kernel_size=kernel_size,
1474
+ stride=upsample_rate,
1475
+ padding=(kernel_size - upsample_rate) // 2,
1476
+ )
1477
+ )
1478
+
1479
+ self.resblocks = nn.ModuleList()
1480
+ for i in range(len(self.upsampler)):
1481
+ channels = config.upsample_initial_channel // (2 ** (i + 1))
1482
+ for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
1483
+ self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
1484
+
1485
+ self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
1486
+
1487
+ self.register_buffer("mean", torch.zeros(config.model_in_dim))
1488
+ self.register_buffer("scale", torch.ones(config.model_in_dim))
1489
+
1490
+ # Initialize weights and apply final processing
1491
+ self.post_init()
1492
+
1493
+ def _init_weights(self, module):
1494
+ """Initialize the weights."""
1495
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
1496
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
1497
+ if module.bias is not None:
1498
+ module.bias.data.zero_()
1499
+
1500
+ def apply_weight_norm(self):
1501
+ nn.utils.weight_norm(self.conv_pre)
1502
+ for layer in self.upsampler:
1503
+ nn.utils.weight_norm(layer)
1504
+ for layer in self.resblocks:
1505
+ layer.apply_weight_norm()
1506
+ nn.utils.weight_norm(self.conv_post)
1507
+
1508
+ def remove_weight_norm(self):
1509
+ nn.utils.remove_weight_norm(self.conv_pre)
1510
+ for layer in self.upsampler:
1511
+ nn.utils.remove_weight_norm(layer)
1512
+ for layer in self.resblocks:
1513
+ layer.remove_weight_norm()
1514
+ nn.utils.remove_weight_norm(self.conv_post)
1515
+
1516
+ def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor:
1517
+ r"""
1518
+ Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
1519
+ of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
1520
+ waveform.
1521
+
1522
+ Args:
1523
+ spectrogram (`torch.FloatTensor`):
1524
+ Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
1525
+ config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.
1526
+
1527
+ Returns:
1528
+ `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
1529
+ shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
1530
+ """
1531
+ if self.config.normalize_before:
1532
+ spectrogram = (spectrogram - self.mean) / self.scale
1533
+
1534
+ is_batched = spectrogram.dim() == 3
1535
+ if not is_batched:
1536
+ spectrogram = spectrogram.unsqueeze(0)
1537
+
1538
+ hidden_states = spectrogram.transpose(2, 1)
1539
+
1540
+ hidden_states = self.conv_pre(hidden_states)
1541
+ for i in range(self.num_upsamples):
1542
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope)
1543
+ hidden_states = self.upsampler[i](hidden_states)
1544
+
1545
+ res_state = self.resblocks[i * self.num_kernels](hidden_states)
1546
+ for j in range(1, self.num_kernels):
1547
+ res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
1548
+ hidden_states = res_state / self.num_kernels
1549
+
1550
+ hidden_states = nn.functional.leaky_relu(hidden_states)
1551
+ hidden_states = self.conv_post(hidden_states)
1552
+ hidden_states = torch.tanh(hidden_states)
1553
+
1554
+ if not is_batched:
1555
+ # remove batch dim and collapse tensor to 1-d audio waveform
1556
+ waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1)
1557
+ else:
1558
+ # remove seq-len dim since this collapses to 1
1559
+ waveform = hidden_states.squeeze(1)
1560
+
1561
+ return waveform
1562
+
1563
+
1564
+ @add_start_docstrings(
1565
+ "The FastSpeech2ConformerModel with a FastSpeech2ConformerHifiGan vocoder head that performs text-to-speech (waveform).",
1566
+ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_START_DOCSTRING,
1567
+ )
1568
+ class FastSpeech2ConformerWithHifiGan(PreTrainedModel):
1569
+ config_class = FastSpeech2ConformerWithHifiGanConfig
1570
+
1571
+ def __init__(self, config: FastSpeech2ConformerWithHifiGanConfig):
1572
+ super().__init__(config)
1573
+
1574
+ self.model = FastSpeech2ConformerModel(config.model_config)
1575
+ self.vocoder = FastSpeech2ConformerHifiGan(config.vocoder_config)
1576
+
1577
+ self.config = config
1578
+
1579
+ @replace_return_docstrings(
1580
+ output_type=FastSpeech2ConformerWithHifiGanOutput, config_class=FastSpeech2ConformerWithHifiGanConfig
1581
+ )
1582
+ def forward(
1583
+ self,
1584
+ input_ids: torch.LongTensor,
1585
+ attention_mask: Optional[torch.LongTensor] = None,
1586
+ spectrogram_labels: Optional[torch.FloatTensor] = None,
1587
+ duration_labels: Optional[torch.LongTensor] = None,
1588
+ pitch_labels: Optional[torch.FloatTensor] = None,
1589
+ energy_labels: Optional[torch.FloatTensor] = None,
1590
+ speaker_ids: Optional[torch.LongTensor] = None,
1591
+ lang_ids: Optional[torch.LongTensor] = None,
1592
+ speaker_embedding: Optional[torch.FloatTensor] = None,
1593
+ return_dict: Optional[bool] = None,
1594
+ output_attentions: Optional[bool] = None,
1595
+ output_hidden_states: Optional[bool] = None,
1596
+ ) -> Union[Tuple, FastSpeech2ConformerModelOutput]:
1597
+ """
1598
+ Args:
1599
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1600
+ Input sequence of text vectors.
1601
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*, defaults to `None`):
1602
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
1603
+ `[0, 1]`: 0 for tokens that are **masked**, 1 for tokens that are **not masked**.
1604
+ spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`):
1605
+ Batch of padded target features.
1606
+ duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`):
1607
+ Batch of padded durations.
1608
+ pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
1609
+ Batch of padded token-averaged pitch.
1610
+ energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
1611
+ Batch of padded token-averaged energy.
1612
+ speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
1613
+ Speaker ids used to condition features of speech output by the model.
1614
+ lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
1615
+ Language ids used to condition features of speech output by the model.
1616
+ speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`):
1617
+ Embedding containing conditioning signals for the features of the speech.
1618
+ return_dict (`bool`, *optional*, defaults to `None`):
1619
+ Whether or not to return a [`FastSpeech2ConformerModelOutput`] instead of a plain tuple.
1620
+ output_attentions (`bool`, *optional*, defaults to `None`):
1621
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1622
+ returned tensors for more detail.
1623
+ output_hidden_states (`bool`, *optional*, defaults to `None`):
1624
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1625
+ for more detail.
1626
+
1627
+ Returns:
1628
+
1629
+ Example:
1630
+
1631
+ ```python
1632
+ >>> from transformers import (
1633
+ ... FastSpeech2ConformerTokenizer,
1634
+ ... FastSpeech2ConformerWithHifiGan,
1635
+ ... )
1636
+
1637
+ >>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
1638
+ >>> inputs = tokenizer("some text to convert to speech", return_tensors="pt")
1639
+ >>> input_ids = inputs["input_ids"]
1640
+
1641
+ >>> model = FastSpeech2ConformerWithHifiGan.from_pretrained("espnet/fastspeech2_conformer_with_hifigan")
1642
+ >>> output_dict = model(input_ids, return_dict=True)
1643
+ >>> waveform = output_dict["waveform"]
1644
+ >>> print(waveform.shape)
1645
+ torch.Size([1, 49664])
1646
+ ```
1647
+ """
1648
+ return_dict = return_dict if return_dict is not None else self.config.model_config.use_return_dict
1649
+ output_attentions = (
1650
+ output_attentions if output_attentions is not None else self.config.model_config.output_attentions
1651
+ )
1652
+ output_hidden_states = (
1653
+ output_hidden_states if output_hidden_states is not None else self.config.model_config.output_hidden_states
1654
+ )
1655
+
1656
+ model_outputs = self.model(
1657
+ input_ids,
1658
+ attention_mask,
1659
+ spectrogram_labels=spectrogram_labels,
1660
+ duration_labels=duration_labels,
1661
+ pitch_labels=pitch_labels,
1662
+ energy_labels=energy_labels,
1663
+ speaker_ids=speaker_ids,
1664
+ lang_ids=lang_ids,
1665
+ speaker_embedding=speaker_embedding,
1666
+ return_dict=return_dict,
1667
+ output_attentions=output_attentions,
1668
+ output_hidden_states=output_hidden_states,
1669
+ )
1670
+
1671
+ if not return_dict:
1672
+ has_missing_labels = (
1673
+ spectrogram_labels is None or duration_labels is None or pitch_labels is None or energy_labels is None
1674
+ )
1675
+ if has_missing_labels:
1676
+ spectrogram = model_outputs[0]
1677
+ else:
1678
+ spectrogram = model_outputs[1]
1679
+ else:
1680
+ spectrogram = model_outputs["spectrogram"]
1681
+ waveform = self.vocoder(spectrogram)
1682
+
1683
+ if not return_dict:
1684
+ return model_outputs + (waveform,)
1685
+
1686
+ return FastSpeech2ConformerWithHifiGanOutput(waveform=waveform, **model_outputs)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for FastSpeech2Conformer."""
16
+ import json
17
+ import os
18
+ from typing import Optional, Tuple
19
+
20
+ import regex
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer
23
+ from ...utils import logging, requires_backends
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {
31
+ "vocab_file": {
32
+ "espnet/fastspeech2_conformer": "https://huggingface.co/espnet/fastspeech2_conformer/raw/main/vocab.json",
33
+ },
34
+ }
35
+
36
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
37
+ # Set to somewhat arbitrary large number as the model input
38
+ # isn't constrained by the relative positional encoding
39
+ "espnet/fastspeech2_conformer": 4096,
40
+ }
41
+
42
+
43
+ class FastSpeech2ConformerTokenizer(PreTrainedTokenizer):
44
+ """
45
+ Construct a FastSpeech2Conformer tokenizer.
46
+
47
+ Args:
48
+ vocab_file (`str`):
49
+ Path to the vocabulary file.
50
+ bos_token (`str`, *optional*, defaults to `"<sos/eos>"`):
51
+ The begin of sequence token. Note that for FastSpeech2, it is the same as the `eos_token`.
52
+ eos_token (`str`, *optional*, defaults to `"<sos/eos>"`):
53
+ The end of sequence token. Note that for FastSpeech2, it is the same as the `bos_token`.
54
+ pad_token (`str`, *optional*, defaults to `"<blank>"`):
55
+ The token used for padding, for example when batching sequences of different lengths.
56
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
57
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
58
+ token instead.
59
+ should_strip_spaces (`bool`, *optional*, defaults to `False`):
60
+ Whether or not to strip the spaces from the list of tokens.
61
+ """
62
+
63
+ vocab_files_names = VOCAB_FILES_NAMES
64
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
65
+ model_input_names = ["input_ids", "attention_mask"]
66
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
67
+
68
+ def __init__(
69
+ self,
70
+ vocab_file,
71
+ bos_token="<sos/eos>",
72
+ eos_token="<sos/eos>",
73
+ pad_token="<blank>",
74
+ unk_token="<unk>",
75
+ should_strip_spaces=False,
76
+ **kwargs,
77
+ ):
78
+ requires_backends(self, "g2p_en")
79
+
80
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
81
+ self.encoder = json.load(vocab_handle)
82
+
83
+ import g2p_en
84
+
85
+ self.g2p = g2p_en.G2p()
86
+
87
+ self.decoder = {v: k for k, v in self.encoder.items()}
88
+
89
+ super().__init__(
90
+ bos_token=bos_token,
91
+ eos_token=eos_token,
92
+ unk_token=unk_token,
93
+ pad_token=pad_token,
94
+ should_strip_spaces=should_strip_spaces,
95
+ **kwargs,
96
+ )
97
+
98
+ self.should_strip_spaces = should_strip_spaces
99
+
100
+ @property
101
+ def vocab_size(self):
102
+ return len(self.decoder)
103
+
104
+ def get_vocab(self):
105
+ "Returns vocab as a dict"
106
+ return dict(self.encoder, **self.added_tokens_encoder)
107
+
108
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
109
+ # expand symbols
110
+ text = regex.sub(";", ",", text)
111
+ text = regex.sub(":", ",", text)
112
+ text = regex.sub("-", " ", text)
113
+ text = regex.sub("&", "and", text)
114
+
115
+ # strip unnecessary symbols
116
+ text = regex.sub(r"[\(\)\[\]\<\>\"]+", "", text)
117
+
118
+ # strip whitespaces
119
+ text = regex.sub(r"\s+", " ", text)
120
+
121
+ text = text.upper()
122
+
123
+ return text, kwargs
124
+
125
+ def _tokenize(self, text):
126
+ """Returns a tokenized string."""
127
+ # phonemize
128
+ tokens = self.g2p(text)
129
+
130
+ if self.should_strip_spaces:
131
+ tokens = list(filter(lambda s: s != " ", tokens))
132
+
133
+ tokens.append(self.eos_token)
134
+
135
+ return tokens
136
+
137
+ def _convert_token_to_id(self, token):
138
+ """Converts a token (str) in an id using the vocab."""
139
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
140
+
141
+ def _convert_id_to_token(self, index):
142
+ """Converts an index (integer) in a token (str) using the vocab."""
143
+ return self.decoder.get(index, self.unk_token)
144
+
145
+ # Override since phonemes cannot be converted back to strings
146
+ def decode(self, token_ids, **kwargs):
147
+ logger.warn(
148
+ "Phonemes cannot be reliably converted to a string due to the one-many mapping, converting to tokens instead."
149
+ )
150
+ return self.convert_ids_to_tokens(token_ids)
151
+
152
+ # Override since phonemes cannot be converted back to strings
153
+ def convert_tokens_to_string(self, tokens, **kwargs):
154
+ logger.warn(
155
+ "Phonemes cannot be reliably converted to a string due to the one-many mapping, returning the tokens."
156
+ )
157
+ return tokens
158
+
159
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
160
+ """
161
+ Save the vocabulary and special tokens file to a directory.
162
+
163
+ Args:
164
+ save_directory (`str`):
165
+ The directory in which to save the vocabulary.
166
+
167
+ Returns:
168
+ `Tuple(str)`: Paths to the files saved.
169
+ """
170
+ if not os.path.isdir(save_directory):
171
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
172
+ return
173
+ vocab_file = os.path.join(
174
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
175
+ )
176
+
177
+ with open(vocab_file, "w", encoding="utf-8") as f:
178
+ f.write(json.dumps(self.get_vocab(), ensure_ascii=False))
179
+
180
+ return (vocab_file,)
181
+
182
+ def __getstate__(self):
183
+ state = self.__dict__.copy()
184
+ state["g2p"] = None
185
+ return state
186
+
187
+ def __setstate__(self, d):
188
+ self.__dict__ = d
189
+
190
+ try:
191
+ import g2p_en
192
+
193
+ self.g2p = g2p_en.G2p()
194
+ except ImportError:
195
+ raise ImportError(
196
+ "You need to install g2p-en to use FastSpeech2ConformerTokenizer. "
197
+ "See https://pypi.org/project/g2p-en/ for installation."
198
+ )
evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
27
+ }
28
+
29
+ try:
30
+ if not is_vision_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["feature_extraction_mobilevit"] = ["MobileViTFeatureExtractor"]
36
+ _import_structure["image_processing_mobilevit"] = ["MobileViTImageProcessor"]
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_mobilevit"] = [
45
+ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "MobileViTForImageClassification",
47
+ "MobileViTForSemanticSegmentation",
48
+ "MobileViTModel",
49
+ "MobileViTPreTrainedModel",
50
+ ]
51
+
52
+ try:
53
+ if not is_tf_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ _import_structure["modeling_tf_mobilevit"] = [
59
+ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
60
+ "TFMobileViTForImageClassification",
61
+ "TFMobileViTForSemanticSegmentation",
62
+ "TFMobileViTModel",
63
+ "TFMobileViTPreTrainedModel",
64
+ ]
65
+
66
+ if TYPE_CHECKING:
67
+ from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
68
+
69
+ try:
70
+ if not is_vision_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .feature_extraction_mobilevit import MobileViTFeatureExtractor
76
+ from .image_processing_mobilevit import MobileViTImageProcessor
77
+
78
+ try:
79
+ if not is_torch_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ from .modeling_mobilevit import (
85
+ MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
86
+ MobileViTForImageClassification,
87
+ MobileViTForSemanticSegmentation,
88
+ MobileViTModel,
89
+ MobileViTPreTrainedModel,
90
+ )
91
+
92
+ try:
93
+ if not is_tf_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ from .modeling_tf_mobilevit import (
99
+ TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
100
+ TFMobileViTForImageClassification,
101
+ TFMobileViTForSemanticSegmentation,
102
+ TFMobileViTModel,
103
+ TFMobileViTPreTrainedModel,
104
+ )
105
+
106
+
107
+ else:
108
+ import sys
109
+
110
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/image_processing_mobilevit.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc ADDED
Binary file (28.3 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_tf_mobilevit.cpython-310.pyc ADDED
Binary file (40.3 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/configuration_mobilevit.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MobileViT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
30
+ "apple/mobilevit-small": "https://huggingface.co/apple/mobilevit-small/resolve/main/config.json",
31
+ "apple/mobilevit-x-small": "https://huggingface.co/apple/mobilevit-x-small/resolve/main/config.json",
32
+ "apple/mobilevit-xx-small": "https://huggingface.co/apple/mobilevit-xx-small/resolve/main/config.json",
33
+ "apple/deeplabv3-mobilevit-small": (
34
+ "https://huggingface.co/apple/deeplabv3-mobilevit-small/resolve/main/config.json"
35
+ ),
36
+ "apple/deeplabv3-mobilevit-x-small": (
37
+ "https://huggingface.co/apple/deeplabv3-mobilevit-x-small/resolve/main/config.json"
38
+ ),
39
+ "apple/deeplabv3-mobilevit-xx-small": (
40
+ "https://huggingface.co/apple/deeplabv3-mobilevit-xx-small/resolve/main/config.json"
41
+ ),
42
+ # See all MobileViT models at https://huggingface.co/models?filter=mobilevit
43
+ }
44
+
45
+
46
+ class MobileViTConfig(PretrainedConfig):
47
+ r"""
48
+ This is the configuration class to store the configuration of a [`MobileViTModel`]. It is used to instantiate a
49
+ MobileViT model according to the specified arguments, defining the model architecture. Instantiating a
50
+ configuration with the defaults will yield a similar configuration to that of the MobileViT
51
+ [apple/mobilevit-small](https://huggingface.co/apple/mobilevit-small) architecture.
52
+
53
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
54
+ documentation from [`PretrainedConfig`] for more information.
55
+
56
+ Args:
57
+ num_channels (`int`, *optional*, defaults to 3):
58
+ The number of input channels.
59
+ image_size (`int`, *optional*, defaults to 256):
60
+ The size (resolution) of each image.
61
+ patch_size (`int`, *optional*, defaults to 2):
62
+ The size (resolution) of each patch.
63
+ hidden_sizes (`List[int]`, *optional*, defaults to `[144, 192, 240]`):
64
+ Dimensionality (hidden size) of the Transformer encoders at each stage.
65
+ neck_hidden_sizes (`List[int]`, *optional*, defaults to `[16, 32, 64, 96, 128, 160, 640]`):
66
+ The number of channels for the feature maps of the backbone.
67
+ num_attention_heads (`int`, *optional*, defaults to 4):
68
+ Number of attention heads for each attention layer in the Transformer encoder.
69
+ mlp_ratio (`float`, *optional*, defaults to 2.0):
70
+ The ratio of the number of channels in the output of the MLP to the number of channels in the input.
71
+ expand_ratio (`float`, *optional*, defaults to 4.0):
72
+ Expansion factor for the MobileNetv2 layers.
73
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
74
+ The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
75
+ conv_kernel_size (`int`, *optional*, defaults to 3):
76
+ The size of the convolutional kernel in the MobileViT layer.
77
+ output_stride (`int`, *optional*, defaults to 32):
78
+ The ratio of the spatial resolution of the output to the resolution of the input image.
79
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
80
+ The dropout probabilitiy for all fully connected layers in the Transformer encoder.
81
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
82
+ The dropout ratio for the attention probabilities.
83
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
84
+ The dropout ratio for attached classifiers.
85
+ initializer_range (`float`, *optional*, defaults to 0.02):
86
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
87
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
88
+ The epsilon used by the layer normalization layers.
89
+ qkv_bias (`bool`, *optional*, defaults to `True`):
90
+ Whether to add a bias to the queries, keys and values.
91
+ aspp_out_channels (`int`, *optional*, defaults to 256):
92
+ Number of output channels used in the ASPP layer for semantic segmentation.
93
+ atrous_rates (`List[int]`, *optional*, defaults to `[6, 12, 18]`):
94
+ Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
95
+ aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
96
+ The dropout ratio for the ASPP layer for semantic segmentation.
97
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
98
+ The index that is ignored by the loss function of the semantic segmentation model.
99
+
100
+ Example:
101
+
102
+ ```python
103
+ >>> from transformers import MobileViTConfig, MobileViTModel
104
+
105
+ >>> # Initializing a mobilevit-small style configuration
106
+ >>> configuration = MobileViTConfig()
107
+
108
+ >>> # Initializing a model from the mobilevit-small style configuration
109
+ >>> model = MobileViTModel(configuration)
110
+
111
+ >>> # Accessing the model configuration
112
+ >>> configuration = model.config
113
+ ```"""
114
+
115
+ model_type = "mobilevit"
116
+
117
+ def __init__(
118
+ self,
119
+ num_channels=3,
120
+ image_size=256,
121
+ patch_size=2,
122
+ hidden_sizes=[144, 192, 240],
123
+ neck_hidden_sizes=[16, 32, 64, 96, 128, 160, 640],
124
+ num_attention_heads=4,
125
+ mlp_ratio=2.0,
126
+ expand_ratio=4.0,
127
+ hidden_act="silu",
128
+ conv_kernel_size=3,
129
+ output_stride=32,
130
+ hidden_dropout_prob=0.1,
131
+ attention_probs_dropout_prob=0.0,
132
+ classifier_dropout_prob=0.1,
133
+ initializer_range=0.02,
134
+ layer_norm_eps=1e-5,
135
+ qkv_bias=True,
136
+ aspp_out_channels=256,
137
+ atrous_rates=[6, 12, 18],
138
+ aspp_dropout_prob=0.1,
139
+ semantic_loss_ignore_index=255,
140
+ **kwargs,
141
+ ):
142
+ super().__init__(**kwargs)
143
+
144
+ self.num_channels = num_channels
145
+ self.image_size = image_size
146
+ self.patch_size = patch_size
147
+ self.hidden_sizes = hidden_sizes
148
+ self.neck_hidden_sizes = neck_hidden_sizes
149
+ self.num_attention_heads = num_attention_heads
150
+ self.mlp_ratio = mlp_ratio
151
+ self.expand_ratio = expand_ratio
152
+ self.hidden_act = hidden_act
153
+ self.conv_kernel_size = conv_kernel_size
154
+ self.output_stride = output_stride
155
+ self.hidden_dropout_prob = hidden_dropout_prob
156
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
157
+ self.classifier_dropout_prob = classifier_dropout_prob
158
+ self.initializer_range = initializer_range
159
+ self.layer_norm_eps = layer_norm_eps
160
+ self.qkv_bias = qkv_bias
161
+
162
+ # decode head attributes for semantic segmentation
163
+ self.aspp_out_channels = aspp_out_channels
164
+ self.atrous_rates = atrous_rates
165
+ self.aspp_dropout_prob = aspp_dropout_prob
166
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
167
+
168
+
169
+ class MobileViTOnnxConfig(OnnxConfig):
170
+ torch_onnx_minimum_version = version.parse("1.11")
171
+
172
+ @property
173
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
174
+ return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"})])
175
+
176
+ @property
177
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
178
+ if self.task == "image-classification":
179
+ return OrderedDict([("logits", {0: "batch"})])
180
+ else:
181
+ return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
182
+
183
+ @property
184
+ def atol_for_validation(self) -> float:
185
+ return 1e-4
evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert MobileViT checkpoints from the ml-cvnets library."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+
27
+ from transformers import (
28
+ MobileViTConfig,
29
+ MobileViTForImageClassification,
30
+ MobileViTForSemanticSegmentation,
31
+ MobileViTImageProcessor,
32
+ )
33
+ from transformers.utils import logging
34
+
35
+
36
+ logging.set_verbosity_info()
37
+ logger = logging.get_logger(__name__)
38
+
39
+
40
+ def get_mobilevit_config(mobilevit_name):
41
+ config = MobileViTConfig()
42
+
43
+ # size of the architecture
44
+ if "mobilevit_s" in mobilevit_name:
45
+ config.hidden_sizes = [144, 192, 240]
46
+ config.neck_hidden_sizes = [16, 32, 64, 96, 128, 160, 640]
47
+ elif "mobilevit_xs" in mobilevit_name:
48
+ config.hidden_sizes = [96, 120, 144]
49
+ config.neck_hidden_sizes = [16, 32, 48, 64, 80, 96, 384]
50
+ elif "mobilevit_xxs" in mobilevit_name:
51
+ config.hidden_sizes = [64, 80, 96]
52
+ config.neck_hidden_sizes = [16, 16, 24, 48, 64, 80, 320]
53
+ config.hidden_dropout_prob = 0.05
54
+ config.expand_ratio = 2.0
55
+
56
+ if mobilevit_name.startswith("deeplabv3_"):
57
+ config.image_size = 512
58
+ config.output_stride = 16
59
+ config.num_labels = 21
60
+ filename = "pascal-voc-id2label.json"
61
+ else:
62
+ config.num_labels = 1000
63
+ filename = "imagenet-1k-id2label.json"
64
+
65
+ repo_id = "huggingface/label-files"
66
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
67
+ id2label = {int(k): v for k, v in id2label.items()}
68
+ config.id2label = id2label
69
+ config.label2id = {v: k for k, v in id2label.items()}
70
+
71
+ return config
72
+
73
+
74
+ def rename_key(name, base_model=False):
75
+ for i in range(1, 6):
76
+ if f"layer_{i}." in name:
77
+ name = name.replace(f"layer_{i}.", f"encoder.layer.{i - 1}.")
78
+
79
+ if "conv_1." in name:
80
+ name = name.replace("conv_1.", "conv_stem.")
81
+ if ".block." in name:
82
+ name = name.replace(".block.", ".")
83
+
84
+ if "exp_1x1" in name:
85
+ name = name.replace("exp_1x1", "expand_1x1")
86
+ if "red_1x1" in name:
87
+ name = name.replace("red_1x1", "reduce_1x1")
88
+ if ".local_rep.conv_3x3." in name:
89
+ name = name.replace(".local_rep.conv_3x3.", ".conv_kxk.")
90
+ if ".local_rep.conv_1x1." in name:
91
+ name = name.replace(".local_rep.conv_1x1.", ".conv_1x1.")
92
+ if ".norm." in name:
93
+ name = name.replace(".norm.", ".normalization.")
94
+ if ".conv." in name:
95
+ name = name.replace(".conv.", ".convolution.")
96
+ if ".conv_proj." in name:
97
+ name = name.replace(".conv_proj.", ".conv_projection.")
98
+
99
+ for i in range(0, 2):
100
+ for j in range(0, 4):
101
+ if f".{i}.{j}." in name:
102
+ name = name.replace(f".{i}.{j}.", f".{i}.layer.{j}.")
103
+
104
+ for i in range(2, 6):
105
+ for j in range(0, 4):
106
+ if f".{i}.{j}." in name:
107
+ name = name.replace(f".{i}.{j}.", f".{i}.")
108
+ if "expand_1x1" in name:
109
+ name = name.replace("expand_1x1", "downsampling_layer.expand_1x1")
110
+ if "conv_3x3" in name:
111
+ name = name.replace("conv_3x3", "downsampling_layer.conv_3x3")
112
+ if "reduce_1x1" in name:
113
+ name = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1")
114
+
115
+ for i in range(2, 5):
116
+ if f".global_rep.{i}.weight" in name:
117
+ name = name.replace(f".global_rep.{i}.weight", ".layernorm.weight")
118
+ if f".global_rep.{i}.bias" in name:
119
+ name = name.replace(f".global_rep.{i}.bias", ".layernorm.bias")
120
+
121
+ if ".global_rep." in name:
122
+ name = name.replace(".global_rep.", ".transformer.")
123
+ if ".pre_norm_mha.0." in name:
124
+ name = name.replace(".pre_norm_mha.0.", ".layernorm_before.")
125
+ if ".pre_norm_mha.1.out_proj." in name:
126
+ name = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense.")
127
+ if ".pre_norm_ffn.0." in name:
128
+ name = name.replace(".pre_norm_ffn.0.", ".layernorm_after.")
129
+ if ".pre_norm_ffn.1." in name:
130
+ name = name.replace(".pre_norm_ffn.1.", ".intermediate.dense.")
131
+ if ".pre_norm_ffn.4." in name:
132
+ name = name.replace(".pre_norm_ffn.4.", ".output.dense.")
133
+ if ".transformer." in name:
134
+ name = name.replace(".transformer.", ".transformer.layer.")
135
+
136
+ if ".aspp_layer." in name:
137
+ name = name.replace(".aspp_layer.", ".")
138
+ if ".aspp_pool." in name:
139
+ name = name.replace(".aspp_pool.", ".")
140
+ if "seg_head." in name:
141
+ name = name.replace("seg_head.", "segmentation_head.")
142
+ if "segmentation_head.classifier.classifier." in name:
143
+ name = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier.")
144
+
145
+ if "classifier.fc." in name:
146
+ name = name.replace("classifier.fc.", "classifier.")
147
+ elif (not base_model) and ("segmentation_head." not in name):
148
+ name = "mobilevit." + name
149
+
150
+ return name
151
+
152
+
153
+ def convert_state_dict(orig_state_dict, model, base_model=False):
154
+ if base_model:
155
+ model_prefix = ""
156
+ else:
157
+ model_prefix = "mobilevit."
158
+
159
+ for key in orig_state_dict.copy().keys():
160
+ val = orig_state_dict.pop(key)
161
+
162
+ if key[:8] == "encoder.":
163
+ key = key[8:]
164
+
165
+ if "qkv" in key:
166
+ key_split = key.split(".")
167
+ layer_num = int(key_split[0][6:]) - 1
168
+ transformer_num = int(key_split[3])
169
+ layer = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}")
170
+ dim = layer.transformer.layer[transformer_num].attention.attention.all_head_size
171
+ prefix = (
172
+ f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
173
+ )
174
+ if "weight" in key:
175
+ orig_state_dict[prefix + "query.weight"] = val[:dim, :]
176
+ orig_state_dict[prefix + "key.weight"] = val[dim : dim * 2, :]
177
+ orig_state_dict[prefix + "value.weight"] = val[-dim:, :]
178
+ else:
179
+ orig_state_dict[prefix + "query.bias"] = val[:dim]
180
+ orig_state_dict[prefix + "key.bias"] = val[dim : dim * 2]
181
+ orig_state_dict[prefix + "value.bias"] = val[-dim:]
182
+ else:
183
+ orig_state_dict[rename_key(key, base_model)] = val
184
+
185
+ return orig_state_dict
186
+
187
+
188
+ # We will verify our results on an image of cute cats
189
+ def prepare_img():
190
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
191
+ im = Image.open(requests.get(url, stream=True).raw)
192
+ return im
193
+
194
+
195
+ @torch.no_grad()
196
+ def convert_movilevit_checkpoint(mobilevit_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
197
+ """
198
+ Copy/paste/tweak model's weights to our MobileViT structure.
199
+ """
200
+ config = get_mobilevit_config(mobilevit_name)
201
+
202
+ # load original state_dict
203
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
204
+
205
+ # load 🤗 model
206
+ if mobilevit_name.startswith("deeplabv3_"):
207
+ model = MobileViTForSemanticSegmentation(config).eval()
208
+ else:
209
+ model = MobileViTForImageClassification(config).eval()
210
+
211
+ new_state_dict = convert_state_dict(state_dict, model)
212
+ model.load_state_dict(new_state_dict)
213
+
214
+ # Check outputs on an image, prepared by MobileViTImageProcessor
215
+ image_processor = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32)
216
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
217
+ outputs = model(**encoding)
218
+ logits = outputs.logits
219
+
220
+ if mobilevit_name.startswith("deeplabv3_"):
221
+ assert logits.shape == (1, 21, 32, 32)
222
+
223
+ if mobilevit_name == "deeplabv3_mobilevit_s":
224
+ expected_logits = torch.tensor(
225
+ [
226
+ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
227
+ [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
228
+ [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
229
+ ]
230
+ )
231
+ elif mobilevit_name == "deeplabv3_mobilevit_xs":
232
+ expected_logits = torch.tensor(
233
+ [
234
+ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
235
+ [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
236
+ [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
237
+ ]
238
+ )
239
+ elif mobilevit_name == "deeplabv3_mobilevit_xxs":
240
+ expected_logits = torch.tensor(
241
+ [
242
+ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
243
+ [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
244
+ [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
245
+ ]
246
+ )
247
+ else:
248
+ raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}")
249
+
250
+ assert torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-4)
251
+ else:
252
+ assert logits.shape == (1, 1000)
253
+
254
+ if mobilevit_name == "mobilevit_s":
255
+ expected_logits = torch.tensor([-0.9866, 0.2392, -1.1241])
256
+ elif mobilevit_name == "mobilevit_xs":
257
+ expected_logits = torch.tensor([-2.4761, -0.9399, -1.9587])
258
+ elif mobilevit_name == "mobilevit_xxs":
259
+ expected_logits = torch.tensor([-1.9364, -1.2327, -0.4653])
260
+ else:
261
+ raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}")
262
+
263
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
264
+
265
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
266
+ print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}")
267
+ model.save_pretrained(pytorch_dump_folder_path)
268
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
269
+ image_processor.save_pretrained(pytorch_dump_folder_path)
270
+
271
+ if push_to_hub:
272
+ model_mapping = {
273
+ "mobilevit_s": "mobilevit-small",
274
+ "mobilevit_xs": "mobilevit-x-small",
275
+ "mobilevit_xxs": "mobilevit-xx-small",
276
+ "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
277
+ "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
278
+ "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
279
+ }
280
+
281
+ print("Pushing to the hub...")
282
+ model_name = model_mapping[mobilevit_name]
283
+ image_processor.push_to_hub(model_name, organization="apple")
284
+ model.push_to_hub(model_name, organization="apple")
285
+
286
+
287
+ if __name__ == "__main__":
288
+ parser = argparse.ArgumentParser()
289
+ # Required parameters
290
+ parser.add_argument(
291
+ "--mobilevit_name",
292
+ default="mobilevit_s",
293
+ type=str,
294
+ help=(
295
+ "Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
296
+ " 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
297
+ ),
298
+ )
299
+ parser.add_argument(
300
+ "--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
301
+ )
302
+ parser.add_argument(
303
+ "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
304
+ )
305
+ parser.add_argument(
306
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
307
+ )
308
+
309
+ args = parser.parse_args()
310
+ convert_movilevit_checkpoint(
311
+ args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
312
+ )
evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/feature_extraction_mobilevit.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for MobileViT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_mobilevit import MobileViTImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class MobileViTFeatureExtractor(MobileViTImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use MobileViTImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
evalkit_tf437/lib/python3.10/site-packages/transformers/models/mobilevit/image_processing_mobilevit.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for MobileViT."""
16
+
17
+ from typing import Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import flip_channel_order, get_resize_output_image_size, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ ChannelDimension,
25
+ ImageInput,
26
+ PILImageResampling,
27
+ infer_channel_dimension_format,
28
+ is_scaled_image,
29
+ make_list_of_images,
30
+ to_numpy_array,
31
+ valid_images,
32
+ )
33
+ from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
34
+
35
+
36
+ if is_vision_available():
37
+ import PIL
38
+
39
+ if is_torch_available():
40
+ import torch
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+
46
+ class MobileViTImageProcessor(BaseImageProcessor):
47
+ r"""
48
+ Constructs a MobileViT image processor.
49
+
50
+ Args:
51
+ do_resize (`bool`, *optional*, defaults to `True`):
52
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
53
+ `do_resize` parameter in the `preprocess` method.
54
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
55
+ Controls the size of the output image after resizing. Can be overridden by the `size` parameter in the
56
+ `preprocess` method.
57
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
58
+ Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
59
+ in the `preprocess` method.
60
+ do_rescale (`bool`, *optional*, defaults to `True`):
61
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
62
+ parameter in the `preprocess` method.
63
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
64
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
65
+ `preprocess` method.
66
+ do_center_crop (`bool`, *optional*, defaults to `True`):
67
+ Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
68
+ image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in
69
+ the `preprocess` method.
70
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
71
+ Desired output size `(size["height"], size["width"])` when applying center-cropping. Can be overridden by
72
+ the `crop_size` parameter in the `preprocess` method.
73
+ do_flip_channel_order (`bool`, *optional*, defaults to `True`):
74
+ Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
75
+ parameter in the `preprocess` method.
76
+ """
77
+
78
+ model_input_names = ["pixel_values"]
79
+
80
+ def __init__(
81
+ self,
82
+ do_resize: bool = True,
83
+ size: Dict[str, int] = None,
84
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
85
+ do_rescale: bool = True,
86
+ rescale_factor: Union[int, float] = 1 / 255,
87
+ do_center_crop: bool = True,
88
+ crop_size: Dict[str, int] = None,
89
+ do_flip_channel_order: bool = True,
90
+ **kwargs,
91
+ ) -> None:
92
+ super().__init__(**kwargs)
93
+ size = size if size is not None else {"shortest_edge": 224}
94
+ size = get_size_dict(size, default_to_square=False)
95
+ crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
96
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
97
+
98
+ self.do_resize = do_resize
99
+ self.size = size
100
+ self.resample = resample
101
+ self.do_rescale = do_rescale
102
+ self.rescale_factor = rescale_factor
103
+ self.do_center_crop = do_center_crop
104
+ self.crop_size = crop_size
105
+ self.do_flip_channel_order = do_flip_channel_order
106
+
107
+ # Copied from transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor.resize with PILImageResampling.BICUBIC->PILImageResampling.BILINEAR
108
+ def resize(
109
+ self,
110
+ image: np.ndarray,
111
+ size: Dict[str, int],
112
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
113
+ data_format: Optional[Union[str, ChannelDimension]] = None,
114
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
115
+ **kwargs,
116
+ ) -> np.ndarray:
117
+ """
118
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
119
+ resized to keep the input aspect ratio.
120
+
121
+ Args:
122
+ image (`np.ndarray`):
123
+ Image to resize.
124
+ size (`Dict[str, int]`):
125
+ Size of the output image.
126
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
127
+ Resampling filter to use when resiizing the image.
128
+ data_format (`str` or `ChannelDimension`, *optional*):
129
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
130
+ input_data_format (`ChannelDimension` or `str`, *optional*):
131
+ The channel dimension format of the input image. If not provided, it will be inferred.
132
+ """
133
+ default_to_square = True
134
+ if "shortest_edge" in size:
135
+ size = size["shortest_edge"]
136
+ default_to_square = False
137
+ elif "height" in size and "width" in size:
138
+ size = (size["height"], size["width"])
139
+ else:
140
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
141
+
142
+ output_size = get_resize_output_image_size(
143
+ image,
144
+ size=size,
145
+ default_to_square=default_to_square,
146
+ input_data_format=input_data_format,
147
+ )
148
+ return resize(
149
+ image,
150
+ size=output_size,
151
+ resample=resample,
152
+ data_format=data_format,
153
+ input_data_format=input_data_format,
154
+ **kwargs,
155
+ )
156
+
157
+ def flip_channel_order(
158
+ self,
159
+ image: np.ndarray,
160
+ data_format: Optional[Union[str, ChannelDimension]] = None,
161
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
162
+ ) -> np.ndarray:
163
+ """
164
+ Flip the color channels from RGB to BGR or vice versa.
165
+
166
+ Args:
167
+ image (`np.ndarray`):
168
+ The image, represented as a numpy array.
169
+ data_format (`ChannelDimension` or `str`, *optional*):
170
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
171
+ input_data_format (`ChannelDimension` or `str`, *optional*):
172
+ The channel dimension format of the input image. If not provided, it will be inferred.
173
+ """
174
+ return flip_channel_order(image, data_format=data_format, input_data_format=input_data_format)
175
+
176
+ def __call__(self, images, segmentation_maps=None, **kwargs):
177
+ """
178
+ Preprocesses a batch of images and optionally segmentation maps.
179
+
180
+ Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
181
+ passed in as positional arguments.
182
+ """
183
+ return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
184
+
185
+ def _preprocess(
186
+ self,
187
+ image: ImageInput,
188
+ do_resize: bool,
189
+ do_rescale: bool,
190
+ do_center_crop: bool,
191
+ do_flip_channel_order: bool,
192
+ size: Optional[Dict[str, int]] = None,
193
+ resample: PILImageResampling = None,
194
+ rescale_factor: Optional[float] = None,
195
+ crop_size: Optional[Dict[str, int]] = None,
196
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
197
+ ):
198
+ if do_resize:
199
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
200
+
201
+ if do_rescale:
202
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
203
+
204
+ if do_center_crop:
205
+ image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
206
+
207
+ if do_flip_channel_order:
208
+ image = self.flip_channel_order(image, input_data_format=input_data_format)
209
+
210
+ return image
211
+
212
+ def _preprocess_image(
213
+ self,
214
+ image: ImageInput,
215
+ do_resize: bool = None,
216
+ size: Dict[str, int] = None,
217
+ resample: PILImageResampling = None,
218
+ do_rescale: bool = None,
219
+ rescale_factor: float = None,
220
+ do_center_crop: bool = None,
221
+ crop_size: Dict[str, int] = None,
222
+ do_flip_channel_order: bool = None,
223
+ data_format: Optional[Union[str, ChannelDimension]] = None,
224
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
225
+ ) -> np.ndarray:
226
+ """Preprocesses a single image."""
227
+ # All transformations expect numpy arrays.
228
+ image = to_numpy_array(image)
229
+ if is_scaled_image(image) and do_rescale:
230
+ logger.warning_once(
231
+ "It looks like you are trying to rescale already rescaled images. If the input"
232
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
233
+ )
234
+ if input_data_format is None:
235
+ input_data_format = infer_channel_dimension_format(image)
236
+
237
+ image = self._preprocess(
238
+ image=image,
239
+ do_resize=do_resize,
240
+ size=size,
241
+ resample=resample,
242
+ do_rescale=do_rescale,
243
+ rescale_factor=rescale_factor,
244
+ do_center_crop=do_center_crop,
245
+ crop_size=crop_size,
246
+ do_flip_channel_order=do_flip_channel_order,
247
+ input_data_format=input_data_format,
248
+ )
249
+
250
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
251
+
252
+ return image
253
+
254
+ def _preprocess_mask(
255
+ self,
256
+ segmentation_map: ImageInput,
257
+ do_resize: bool = None,
258
+ size: Dict[str, int] = None,
259
+ do_center_crop: bool = None,
260
+ crop_size: Dict[str, int] = None,
261
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
262
+ ) -> np.ndarray:
263
+ """Preprocesses a single mask."""
264
+ segmentation_map = to_numpy_array(segmentation_map)
265
+ # Add channel dimension if missing - needed for certain transformations
266
+ if segmentation_map.ndim == 2:
267
+ added_channel_dim = True
268
+ segmentation_map = segmentation_map[None, ...]
269
+ input_data_format = ChannelDimension.FIRST
270
+ else:
271
+ added_channel_dim = False
272
+ if input_data_format is None:
273
+ input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
274
+
275
+ segmentation_map = self._preprocess(
276
+ image=segmentation_map,
277
+ do_resize=do_resize,
278
+ size=size,
279
+ resample=PILImageResampling.NEAREST,
280
+ do_rescale=False,
281
+ do_center_crop=do_center_crop,
282
+ crop_size=crop_size,
283
+ do_flip_channel_order=False,
284
+ input_data_format=input_data_format,
285
+ )
286
+ # Remove extra channel dimension if added for processing
287
+ if added_channel_dim:
288
+ segmentation_map = segmentation_map.squeeze(0)
289
+ segmentation_map = segmentation_map.astype(np.int64)
290
+ return segmentation_map
291
+
292
+ def preprocess(
293
+ self,
294
+ images: ImageInput,
295
+ segmentation_maps: Optional[ImageInput] = None,
296
+ do_resize: bool = None,
297
+ size: Dict[str, int] = None,
298
+ resample: PILImageResampling = None,
299
+ do_rescale: bool = None,
300
+ rescale_factor: float = None,
301
+ do_center_crop: bool = None,
302
+ crop_size: Dict[str, int] = None,
303
+ do_flip_channel_order: bool = None,
304
+ return_tensors: Optional[Union[str, TensorType]] = None,
305
+ data_format: ChannelDimension = ChannelDimension.FIRST,
306
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
307
+ **kwargs,
308
+ ) -> PIL.Image.Image:
309
+ """
310
+ Preprocess an image or batch of images.
311
+
312
+ Args:
313
+ images (`ImageInput`):
314
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
315
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
316
+ segmentation_maps (`ImageInput`, *optional*):
317
+ Segmentation map to preprocess.
318
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
319
+ Whether to resize the image.
320
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
321
+ Size of the image after resizing.
322
+ resample (`int`, *optional*, defaults to `self.resample`):
323
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
324
+ has an effect if `do_resize` is set to `True`.
325
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
326
+ Whether to rescale the image by rescale factor.
327
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
328
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
329
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
330
+ Whether to center crop the image.
331
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
332
+ Size of the center crop if `do_center_crop` is set to `True`.
333
+ do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
334
+ Whether to flip the channel order of the image.
335
+ return_tensors (`str` or `TensorType`, *optional*):
336
+ The type of tensors to return. Can be one of:
337
+ - Unset: Return a list of `np.ndarray`.
338
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
339
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
340
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
341
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
342
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
343
+ The channel dimension format for the output image. Can be one of:
344
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
345
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
346
+ input_data_format (`ChannelDimension` or `str`, *optional*):
347
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
348
+ from the input image. Can be one of:
349
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
350
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
351
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
352
+ """
353
+ do_resize = do_resize if do_resize is not None else self.do_resize
354
+ resample = resample if resample is not None else self.resample
355
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
356
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
357
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
358
+ do_flip_channel_order = (
359
+ do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
360
+ )
361
+
362
+ size = size if size is not None else self.size
363
+ size = get_size_dict(size, default_to_square=False)
364
+ crop_size = crop_size if crop_size is not None else self.crop_size
365
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
366
+
367
+ images = make_list_of_images(images)
368
+ if segmentation_maps is not None:
369
+ segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
370
+
371
+ if not valid_images(images):
372
+ raise ValueError(
373
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
374
+ "torch.Tensor, tf.Tensor or jax.ndarray."
375
+ )
376
+
377
+ if segmentation_maps is not None and not valid_images(segmentation_maps):
378
+ raise ValueError(
379
+ "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, "
380
+ "torch.Tensor, tf.Tensor or jax.ndarray."
381
+ )
382
+
383
+ if do_resize and size is None:
384
+ raise ValueError("Size must be specified if do_resize is True.")
385
+
386
+ if do_rescale and rescale_factor is None:
387
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
388
+
389
+ if do_center_crop and crop_size is None:
390
+ raise ValueError("Crop size must be specified if do_center_crop is True.")
391
+
392
+ images = [
393
+ self._preprocess_image(
394
+ image=img,
395
+ do_resize=do_resize,
396
+ size=size,
397
+ resample=resample,
398
+ do_rescale=do_rescale,
399
+ rescale_factor=rescale_factor,
400
+ do_center_crop=do_center_crop,
401
+ crop_size=crop_size,
402
+ do_flip_channel_order=do_flip_channel_order,
403
+ data_format=data_format,
404
+ input_data_format=input_data_format,
405
+ )
406
+ for img in images
407
+ ]
408
+
409
+ data = {"pixel_values": images}
410
+
411
+ if segmentation_maps is not None:
412
+ segmentation_maps = [
413
+ self._preprocess_mask(
414
+ segmentation_map=segmentation_map,
415
+ do_resize=do_resize,
416
+ size=size,
417
+ do_center_crop=do_center_crop,
418
+ crop_size=crop_size,
419
+ input_data_format=input_data_format,
420
+ )
421
+ for segmentation_map in segmentation_maps
422
+ ]
423
+
424
+ data["labels"] = segmentation_maps
425
+
426
+ return BatchFeature(data=data, tensor_type=return_tensors)
427
+
428
+ # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->MobileViT
429
+ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
430
+ """
431
+ Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
432
+
433
+ Args:
434
+ outputs ([`MobileViTForSemanticSegmentation`]):
435
+ Raw outputs of the model.
436
+ target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
437
+ List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
438
+ predictions will not be resized.
439
+
440
+ Returns:
441
+ semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
442
+ segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
443
+ specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
444
+ """
445
+ # TODO: add support for other frameworks
446
+ logits = outputs.logits
447
+
448
+ # Resize logits and compute semantic segmentation maps
449
+ if target_sizes is not None:
450
+ if len(logits) != len(target_sizes):
451
+ raise ValueError(
452
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
453
+ )
454
+
455
+ if is_torch_tensor(target_sizes):
456
+ target_sizes = target_sizes.numpy()
457
+
458
+ semantic_segmentation = []
459
+
460
+ for idx in range(len(logits)):
461
+ resized_logits = torch.nn.functional.interpolate(
462
+ logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
463
+ )
464
+ semantic_map = resized_logits[0].argmax(dim=0)
465
+ semantic_segmentation.append(semantic_map)
466
+ else:
467
+ semantic_segmentation = logits.argmax(dim=1)
468
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
469
+
470
+ return semantic_segmentation