Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- janus/lib/python3.10/site-packages/transformers/models/bartpho/__init__.py +26 -0
- janus/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/__init__.py +34 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/configuration_clip.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/image_processing_clip.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_clip.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_flax_clip.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_tf_clip.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/processing_clip.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip_fast.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/configuration_clip.py +422 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/feature_extraction_clip.py +36 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/image_processing_clip.py +348 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/modeling_clip.py +1689 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/modeling_flax_clip.py +1306 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/modeling_tf_clip.py +1460 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/processing_clip.py +156 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip.py +519 -0
- janus/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip_fast.py +164 -0
- janus/lib/python3.10/site-packages/transformers/models/code_llama/tokenization_code_llama_fast.py +381 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_deberta.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__init__.py +30 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/configuration_deberta_v2.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_deberta_v2.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_tf_deberta_v2.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2_fast.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta_v2/configuration_deberta_v2.py +198 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py +1519 -0
- janus/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py +223 -0
- janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__init__.py +27 -0
- janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/configuration_dinov2_with_registers.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/modeling_dinov2_with_registers.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/modular_dinov2_with_registers.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py +159 -0
- janus/lib/python3.10/site-packages/transformers/models/focalnet/__init__.py +27 -0
- janus/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/configuration_focalnet.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/modeling_focalnet.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/focalnet/configuration_focalnet.py +164 -0
- janus/lib/python3.10/site-packages/transformers/models/focalnet/modeling_focalnet.py +1038 -0
- janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__init__.py +83 -0
- janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/configuration_instructblipvideo.py +342 -0
- janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/modeling_instructblipvideo.py +1670 -0
- janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/modular_instructblipvideo.py +483 -0
- janus/lib/python3.10/site-packages/transformers/models/pixtral/__init__.py +30 -0
- janus/lib/python3.10/site-packages/transformers/models/pixtral/__pycache__/image_processing_pixtral_fast.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/transformers/models/pixtral/image_processing_pixtral_fast.py +355 -0
janus/lib/python3.10/site-packages/transformers/models/bartpho/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import _LazyModule
|
| 17 |
+
from ...utils.import_utils import define_import_structure
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from .tokenization_bartpho import *
|
| 22 |
+
else:
|
| 23 |
+
import sys
|
| 24 |
+
|
| 25 |
+
_file = globals()["__file__"]
|
| 26 |
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (511 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/clip/__init__.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import _LazyModule
|
| 17 |
+
from ...utils.import_utils import define_import_structure
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from .configuration_clip import *
|
| 22 |
+
from .feature_extraction_clip import *
|
| 23 |
+
from .image_processing_clip import *
|
| 24 |
+
from .modeling_clip import *
|
| 25 |
+
from .modeling_flax_clip import *
|
| 26 |
+
from .modeling_tf_clip import *
|
| 27 |
+
from .processing_clip import *
|
| 28 |
+
from .tokenization_clip import *
|
| 29 |
+
from .tokenization_clip_fast import *
|
| 30 |
+
else:
|
| 31 |
+
import sys
|
| 32 |
+
|
| 33 |
+
_file = globals()["__file__"]
|
| 34 |
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/configuration_clip.cpython-310.pyc
ADDED
|
Binary file (15.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/image_processing_clip.cpython-310.pyc
ADDED
|
Binary file (12.9 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_clip.cpython-310.pyc
ADDED
|
Binary file (50.8 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_flax_clip.cpython-310.pyc
ADDED
|
Binary file (38.6 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_tf_clip.cpython-310.pyc
ADDED
|
Binary file (44.3 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/processing_clip.cpython-310.pyc
ADDED
|
Binary file (6.5 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip.cpython-310.pyc
ADDED
|
Binary file (17.6 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip_fast.cpython-310.pyc
ADDED
|
Binary file (5.91 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/clip/configuration_clip.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""CLIP model configuration"""
|
| 16 |
+
|
| 17 |
+
from collections import OrderedDict
|
| 18 |
+
from typing import TYPE_CHECKING, Any, Mapping, Optional
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
if TYPE_CHECKING:
|
| 22 |
+
from ...processing_utils import ProcessorMixin
|
| 23 |
+
from ...utils import TensorType
|
| 24 |
+
|
| 25 |
+
from ...configuration_utils import PretrainedConfig
|
| 26 |
+
from ...onnx import OnnxConfig
|
| 27 |
+
from ...utils import logging
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
logger = logging.get_logger(__name__)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class CLIPTextConfig(PretrainedConfig):
|
| 34 |
+
r"""
|
| 35 |
+
This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP
|
| 36 |
+
text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 37 |
+
with the defaults will yield a similar configuration to that of the text encoder of the CLIP
|
| 38 |
+
[openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
|
| 39 |
+
|
| 40 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 41 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
vocab_size (`int`, *optional*, defaults to 49408):
|
| 45 |
+
Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by
|
| 46 |
+
the `inputs_ids` passed when calling [`CLIPModel`].
|
| 47 |
+
hidden_size (`int`, *optional*, defaults to 512):
|
| 48 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 49 |
+
intermediate_size (`int`, *optional*, defaults to 2048):
|
| 50 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 51 |
+
projection_dim (`int`, *optional*, defaults to 512):
|
| 52 |
+
Dimensionality of text and vision projection layers.
|
| 53 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 54 |
+
Number of hidden layers in the Transformer encoder.
|
| 55 |
+
num_attention_heads (`int`, *optional*, defaults to 8):
|
| 56 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 57 |
+
max_position_embeddings (`int`, *optional*, defaults to 77):
|
| 58 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 59 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 60 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
|
| 61 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 62 |
+
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
|
| 63 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
| 64 |
+
The epsilon used by the layer normalization layers.
|
| 65 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 66 |
+
The dropout ratio for the attention probabilities.
|
| 67 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 68 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 69 |
+
initializer_factor (`float`, *optional*, defaults to 1.0):
|
| 70 |
+
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
|
| 71 |
+
testing).
|
| 72 |
+
pad_token_id (`int`, *optional*, defaults to 1):
|
| 73 |
+
Padding token id.
|
| 74 |
+
bos_token_id (`int`, *optional*, defaults to 49406):
|
| 75 |
+
Beginning of stream token id.
|
| 76 |
+
eos_token_id (`int`, *optional*, defaults to 49407):
|
| 77 |
+
End of stream token id.
|
| 78 |
+
|
| 79 |
+
Example:
|
| 80 |
+
|
| 81 |
+
```python
|
| 82 |
+
>>> from transformers import CLIPTextConfig, CLIPTextModel
|
| 83 |
+
|
| 84 |
+
>>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration
|
| 85 |
+
>>> configuration = CLIPTextConfig()
|
| 86 |
+
|
| 87 |
+
>>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
|
| 88 |
+
>>> model = CLIPTextModel(configuration)
|
| 89 |
+
|
| 90 |
+
>>> # Accessing the model configuration
|
| 91 |
+
>>> configuration = model.config
|
| 92 |
+
```"""
|
| 93 |
+
|
| 94 |
+
model_type = "clip_text_model"
|
| 95 |
+
base_config_key = "text_config"
|
| 96 |
+
|
| 97 |
+
def __init__(
|
| 98 |
+
self,
|
| 99 |
+
vocab_size=49408,
|
| 100 |
+
hidden_size=512,
|
| 101 |
+
intermediate_size=2048,
|
| 102 |
+
projection_dim=512,
|
| 103 |
+
num_hidden_layers=12,
|
| 104 |
+
num_attention_heads=8,
|
| 105 |
+
max_position_embeddings=77,
|
| 106 |
+
hidden_act="quick_gelu",
|
| 107 |
+
layer_norm_eps=1e-5,
|
| 108 |
+
attention_dropout=0.0,
|
| 109 |
+
initializer_range=0.02,
|
| 110 |
+
initializer_factor=1.0,
|
| 111 |
+
# This differs from `CLIPTokenizer`'s default and from openai/clip
|
| 112 |
+
# See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538
|
| 113 |
+
pad_token_id=1,
|
| 114 |
+
bos_token_id=49406,
|
| 115 |
+
eos_token_id=49407,
|
| 116 |
+
**kwargs,
|
| 117 |
+
):
|
| 118 |
+
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
| 119 |
+
|
| 120 |
+
self.vocab_size = vocab_size
|
| 121 |
+
self.hidden_size = hidden_size
|
| 122 |
+
self.intermediate_size = intermediate_size
|
| 123 |
+
self.projection_dim = projection_dim
|
| 124 |
+
self.num_hidden_layers = num_hidden_layers
|
| 125 |
+
self.num_attention_heads = num_attention_heads
|
| 126 |
+
self.max_position_embeddings = max_position_embeddings
|
| 127 |
+
self.layer_norm_eps = layer_norm_eps
|
| 128 |
+
self.hidden_act = hidden_act
|
| 129 |
+
self.initializer_range = initializer_range
|
| 130 |
+
self.initializer_factor = initializer_factor
|
| 131 |
+
self.attention_dropout = attention_dropout
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class CLIPVisionConfig(PretrainedConfig):
|
| 135 |
+
r"""
|
| 136 |
+
This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a
|
| 137 |
+
CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a
|
| 138 |
+
configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP
|
| 139 |
+
[openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
|
| 140 |
+
|
| 141 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 142 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 146 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 147 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 148 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 149 |
+
projection_dim (`int`, *optional*, defaults to 512):
|
| 150 |
+
Dimensionality of text and vision projection layers.
|
| 151 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 152 |
+
Number of hidden layers in the Transformer encoder.
|
| 153 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 154 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 155 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 156 |
+
The number of input channels.
|
| 157 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 158 |
+
The size (resolution) of each image.
|
| 159 |
+
patch_size (`int`, *optional*, defaults to 32):
|
| 160 |
+
The size (resolution) of each patch.
|
| 161 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
|
| 162 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 163 |
+
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
|
| 164 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
| 165 |
+
The epsilon used by the layer normalization layers.
|
| 166 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 167 |
+
The dropout ratio for the attention probabilities.
|
| 168 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 169 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 170 |
+
initializer_factor (`float`, *optional*, defaults to 1.0):
|
| 171 |
+
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
|
| 172 |
+
testing).
|
| 173 |
+
|
| 174 |
+
Example:
|
| 175 |
+
|
| 176 |
+
```python
|
| 177 |
+
>>> from transformers import CLIPVisionConfig, CLIPVisionModel
|
| 178 |
+
|
| 179 |
+
>>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration
|
| 180 |
+
>>> configuration = CLIPVisionConfig()
|
| 181 |
+
|
| 182 |
+
>>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
|
| 183 |
+
>>> model = CLIPVisionModel(configuration)
|
| 184 |
+
|
| 185 |
+
>>> # Accessing the model configuration
|
| 186 |
+
>>> configuration = model.config
|
| 187 |
+
```"""
|
| 188 |
+
|
| 189 |
+
model_type = "clip_vision_model"
|
| 190 |
+
base_config_key = "vision_config"
|
| 191 |
+
|
| 192 |
+
def __init__(
|
| 193 |
+
self,
|
| 194 |
+
hidden_size=768,
|
| 195 |
+
intermediate_size=3072,
|
| 196 |
+
projection_dim=512,
|
| 197 |
+
num_hidden_layers=12,
|
| 198 |
+
num_attention_heads=12,
|
| 199 |
+
num_channels=3,
|
| 200 |
+
image_size=224,
|
| 201 |
+
patch_size=32,
|
| 202 |
+
hidden_act="quick_gelu",
|
| 203 |
+
layer_norm_eps=1e-5,
|
| 204 |
+
attention_dropout=0.0,
|
| 205 |
+
initializer_range=0.02,
|
| 206 |
+
initializer_factor=1.0,
|
| 207 |
+
**kwargs,
|
| 208 |
+
):
|
| 209 |
+
super().__init__(**kwargs)
|
| 210 |
+
|
| 211 |
+
self.hidden_size = hidden_size
|
| 212 |
+
self.intermediate_size = intermediate_size
|
| 213 |
+
self.projection_dim = projection_dim
|
| 214 |
+
self.num_hidden_layers = num_hidden_layers
|
| 215 |
+
self.num_attention_heads = num_attention_heads
|
| 216 |
+
self.num_channels = num_channels
|
| 217 |
+
self.patch_size = patch_size
|
| 218 |
+
self.image_size = image_size
|
| 219 |
+
self.initializer_range = initializer_range
|
| 220 |
+
self.initializer_factor = initializer_factor
|
| 221 |
+
self.attention_dropout = attention_dropout
|
| 222 |
+
self.layer_norm_eps = layer_norm_eps
|
| 223 |
+
self.hidden_act = hidden_act
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class CLIPConfig(PretrainedConfig):
|
| 227 |
+
r"""
|
| 228 |
+
[`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate
|
| 229 |
+
a CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating
|
| 230 |
+
a configuration with the defaults will yield a similar configuration to that of the CLIP
|
| 231 |
+
[openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
|
| 232 |
+
|
| 233 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 234 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
text_config (`dict`, *optional*):
|
| 238 |
+
Dictionary of configuration options used to initialize [`CLIPTextConfig`].
|
| 239 |
+
vision_config (`dict`, *optional*):
|
| 240 |
+
Dictionary of configuration options used to initialize [`CLIPVisionConfig`].
|
| 241 |
+
projection_dim (`int`, *optional*, defaults to 512):
|
| 242 |
+
Dimensionality of text and vision projection layers.
|
| 243 |
+
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
|
| 244 |
+
The initial value of the *logit_scale* parameter. Default is used as per the original CLIP implementation.
|
| 245 |
+
kwargs (*optional*):
|
| 246 |
+
Dictionary of keyword arguments.
|
| 247 |
+
|
| 248 |
+
Example:
|
| 249 |
+
|
| 250 |
+
```python
|
| 251 |
+
>>> from transformers import CLIPConfig, CLIPModel
|
| 252 |
+
|
| 253 |
+
>>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration
|
| 254 |
+
>>> configuration = CLIPConfig()
|
| 255 |
+
|
| 256 |
+
>>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
|
| 257 |
+
>>> model = CLIPModel(configuration)
|
| 258 |
+
|
| 259 |
+
>>> # Accessing the model configuration
|
| 260 |
+
>>> configuration = model.config
|
| 261 |
+
|
| 262 |
+
>>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig
|
| 263 |
+
>>> from transformers import CLIPTextConfig, CLIPVisionConfig
|
| 264 |
+
|
| 265 |
+
>>> # Initializing a CLIPText and CLIPVision configuration
|
| 266 |
+
>>> config_text = CLIPTextConfig()
|
| 267 |
+
>>> config_vision = CLIPVisionConfig()
|
| 268 |
+
|
| 269 |
+
>>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision)
|
| 270 |
+
```"""
|
| 271 |
+
|
| 272 |
+
model_type = "clip"
|
| 273 |
+
sub_configs = {"text_config": CLIPTextConfig, "vision_config": CLIPVisionConfig}
|
| 274 |
+
|
| 275 |
+
def __init__(
|
| 276 |
+
self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
|
| 277 |
+
):
|
| 278 |
+
# If `_config_dict` exist, we use them for the backward compatibility.
|
| 279 |
+
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
|
| 280 |
+
# of confusion!).
|
| 281 |
+
text_config_dict = kwargs.pop("text_config_dict", None)
|
| 282 |
+
vision_config_dict = kwargs.pop("vision_config_dict", None)
|
| 283 |
+
|
| 284 |
+
super().__init__(**kwargs)
|
| 285 |
+
|
| 286 |
+
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
|
| 287 |
+
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
|
| 288 |
+
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
|
| 289 |
+
if text_config_dict is not None:
|
| 290 |
+
if text_config is None:
|
| 291 |
+
text_config = {}
|
| 292 |
+
|
| 293 |
+
# This is the complete result when using `text_config_dict`.
|
| 294 |
+
_text_config_dict = CLIPTextConfig(**text_config_dict).to_dict()
|
| 295 |
+
|
| 296 |
+
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
|
| 297 |
+
for key, value in _text_config_dict.items():
|
| 298 |
+
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
|
| 299 |
+
# If specified in `text_config_dict`
|
| 300 |
+
if key in text_config_dict:
|
| 301 |
+
message = (
|
| 302 |
+
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
|
| 303 |
+
f'The value `text_config_dict["{key}"]` will be used instead.'
|
| 304 |
+
)
|
| 305 |
+
# If inferred from default argument values (just to be super careful)
|
| 306 |
+
else:
|
| 307 |
+
message = (
|
| 308 |
+
f"`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The "
|
| 309 |
+
f'value `text_config["{key}"]` will be overridden.'
|
| 310 |
+
)
|
| 311 |
+
logger.info(message)
|
| 312 |
+
|
| 313 |
+
# Update all values in `text_config` with the ones in `_text_config_dict`.
|
| 314 |
+
text_config.update(_text_config_dict)
|
| 315 |
+
|
| 316 |
+
if vision_config_dict is not None:
|
| 317 |
+
if vision_config is None:
|
| 318 |
+
vision_config = {}
|
| 319 |
+
|
| 320 |
+
# This is the complete result when using `vision_config_dict`.
|
| 321 |
+
_vision_config_dict = CLIPVisionConfig(**vision_config_dict).to_dict()
|
| 322 |
+
# convert keys to string instead of integer
|
| 323 |
+
if "id2label" in _vision_config_dict:
|
| 324 |
+
_vision_config_dict["id2label"] = {
|
| 325 |
+
str(key): value for key, value in _vision_config_dict["id2label"].items()
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
|
| 329 |
+
for key, value in _vision_config_dict.items():
|
| 330 |
+
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
|
| 331 |
+
# If specified in `vision_config_dict`
|
| 332 |
+
if key in vision_config_dict:
|
| 333 |
+
message = (
|
| 334 |
+
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
|
| 335 |
+
f'values. The value `vision_config_dict["{key}"]` will be used instead.'
|
| 336 |
+
)
|
| 337 |
+
# If inferred from default argument values (just to be super careful)
|
| 338 |
+
else:
|
| 339 |
+
message = (
|
| 340 |
+
f"`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. "
|
| 341 |
+
f'The value `vision_config["{key}"]` will be overridden.'
|
| 342 |
+
)
|
| 343 |
+
logger.info(message)
|
| 344 |
+
|
| 345 |
+
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
|
| 346 |
+
vision_config.update(_vision_config_dict)
|
| 347 |
+
|
| 348 |
+
if text_config is None:
|
| 349 |
+
text_config = {}
|
| 350 |
+
logger.info("`text_config` is `None`. Initializing the `CLIPTextConfig` with default values.")
|
| 351 |
+
|
| 352 |
+
if vision_config is None:
|
| 353 |
+
vision_config = {}
|
| 354 |
+
logger.info("`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.")
|
| 355 |
+
|
| 356 |
+
self.text_config = CLIPTextConfig(**text_config)
|
| 357 |
+
self.vision_config = CLIPVisionConfig(**vision_config)
|
| 358 |
+
|
| 359 |
+
self.projection_dim = projection_dim
|
| 360 |
+
self.logit_scale_init_value = logit_scale_init_value
|
| 361 |
+
self.initializer_factor = 1.0
|
| 362 |
+
|
| 363 |
+
@classmethod
|
| 364 |
+
def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs):
|
| 365 |
+
r"""
|
| 366 |
+
Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model
|
| 367 |
+
configuration.
|
| 368 |
+
|
| 369 |
+
Returns:
|
| 370 |
+
[`CLIPConfig`]: An instance of a configuration object
|
| 371 |
+
"""
|
| 372 |
+
|
| 373 |
+
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
class CLIPOnnxConfig(OnnxConfig):
|
| 377 |
+
@property
|
| 378 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
| 379 |
+
return OrderedDict(
|
| 380 |
+
[
|
| 381 |
+
("input_ids", {0: "batch", 1: "sequence"}),
|
| 382 |
+
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
|
| 383 |
+
("attention_mask", {0: "batch", 1: "sequence"}),
|
| 384 |
+
]
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
@property
|
| 388 |
+
def outputs(self) -> Mapping[str, Mapping[int, str]]:
|
| 389 |
+
return OrderedDict(
|
| 390 |
+
[
|
| 391 |
+
("logits_per_image", {0: "batch"}),
|
| 392 |
+
("logits_per_text", {0: "batch"}),
|
| 393 |
+
("text_embeds", {0: "batch"}),
|
| 394 |
+
("image_embeds", {0: "batch"}),
|
| 395 |
+
]
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
@property
|
| 399 |
+
def atol_for_validation(self) -> float:
|
| 400 |
+
return 1e-4
|
| 401 |
+
|
| 402 |
+
def generate_dummy_inputs(
|
| 403 |
+
self,
|
| 404 |
+
processor: "ProcessorMixin",
|
| 405 |
+
batch_size: int = -1,
|
| 406 |
+
seq_length: int = -1,
|
| 407 |
+
framework: Optional["TensorType"] = None,
|
| 408 |
+
) -> Mapping[str, Any]:
|
| 409 |
+
text_input_dict = super().generate_dummy_inputs(
|
| 410 |
+
processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
|
| 411 |
+
)
|
| 412 |
+
image_input_dict = super().generate_dummy_inputs(
|
| 413 |
+
processor.image_processor, batch_size=batch_size, framework=framework
|
| 414 |
+
)
|
| 415 |
+
return {**text_input_dict, **image_input_dict}
|
| 416 |
+
|
| 417 |
+
@property
|
| 418 |
+
def default_onnx_opset(self) -> int:
|
| 419 |
+
return 14
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
__all__ = ["CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig"]
|
janus/lib/python3.10/site-packages/transformers/models/clip/feature_extraction_clip.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Feature extractor class for CLIP."""
|
| 16 |
+
|
| 17 |
+
import warnings
|
| 18 |
+
|
| 19 |
+
from ...utils import logging
|
| 20 |
+
from .image_processing_clip import CLIPImageProcessor
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.get_logger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class CLIPFeatureExtractor(CLIPImageProcessor):
|
| 27 |
+
def __init__(self, *args, **kwargs) -> None:
|
| 28 |
+
warnings.warn(
|
| 29 |
+
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
|
| 30 |
+
" use CLIPImageProcessor instead.",
|
| 31 |
+
FutureWarning,
|
| 32 |
+
)
|
| 33 |
+
super().__init__(*args, **kwargs)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
__all__ = ["CLIPFeatureExtractor"]
|
janus/lib/python3.10/site-packages/transformers/models/clip/image_processing_clip.py
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Image processor class for CLIP."""
|
| 16 |
+
|
| 17 |
+
from typing import Dict, List, Optional, Union
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
| 22 |
+
from ...image_transforms import (
|
| 23 |
+
convert_to_rgb,
|
| 24 |
+
get_resize_output_image_size,
|
| 25 |
+
resize,
|
| 26 |
+
to_channel_dimension_format,
|
| 27 |
+
)
|
| 28 |
+
from ...image_utils import (
|
| 29 |
+
OPENAI_CLIP_MEAN,
|
| 30 |
+
OPENAI_CLIP_STD,
|
| 31 |
+
ChannelDimension,
|
| 32 |
+
ImageInput,
|
| 33 |
+
PILImageResampling,
|
| 34 |
+
infer_channel_dimension_format,
|
| 35 |
+
is_scaled_image,
|
| 36 |
+
make_list_of_images,
|
| 37 |
+
to_numpy_array,
|
| 38 |
+
valid_images,
|
| 39 |
+
validate_kwargs,
|
| 40 |
+
validate_preprocess_arguments,
|
| 41 |
+
)
|
| 42 |
+
from ...utils import TensorType, is_vision_available, logging
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
logger = logging.get_logger(__name__)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if is_vision_available():
|
| 49 |
+
import PIL
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class CLIPImageProcessor(BaseImageProcessor):
|
| 53 |
+
r"""
|
| 54 |
+
Constructs a CLIP image processor.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
| 58 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
|
| 59 |
+
`do_resize` in the `preprocess` method.
|
| 60 |
+
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
|
| 61 |
+
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
|
| 62 |
+
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
|
| 63 |
+
method.
|
| 64 |
+
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
|
| 65 |
+
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
|
| 66 |
+
do_center_crop (`bool`, *optional*, defaults to `True`):
|
| 67 |
+
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
|
| 68 |
+
`preprocess` method.
|
| 69 |
+
crop_size (`Dict[str, int]` *optional*, defaults to 224):
|
| 70 |
+
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
|
| 71 |
+
method.
|
| 72 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
| 73 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
|
| 74 |
+
the `preprocess` method.
|
| 75 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
| 76 |
+
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
|
| 77 |
+
method.
|
| 78 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
| 79 |
+
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
|
| 80 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
|
| 81 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
| 82 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
| 83 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
|
| 84 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
| 85 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 86 |
+
Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 87 |
+
do_convert_rgb (`bool`, *optional*, defaults to `True`):
|
| 88 |
+
Whether to convert the image to RGB.
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
model_input_names = ["pixel_values"]
|
| 92 |
+
|
| 93 |
+
def __init__(
|
| 94 |
+
self,
|
| 95 |
+
do_resize: bool = True,
|
| 96 |
+
size: Dict[str, int] = None,
|
| 97 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
| 98 |
+
do_center_crop: bool = True,
|
| 99 |
+
crop_size: Dict[str, int] = None,
|
| 100 |
+
do_rescale: bool = True,
|
| 101 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
| 102 |
+
do_normalize: bool = True,
|
| 103 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 104 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 105 |
+
do_convert_rgb: bool = True,
|
| 106 |
+
**kwargs,
|
| 107 |
+
) -> None:
|
| 108 |
+
super().__init__(**kwargs)
|
| 109 |
+
size = size if size is not None else {"shortest_edge": 224}
|
| 110 |
+
size = get_size_dict(size, default_to_square=False)
|
| 111 |
+
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
|
| 112 |
+
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
|
| 113 |
+
|
| 114 |
+
self.do_resize = do_resize
|
| 115 |
+
self.size = size
|
| 116 |
+
self.resample = resample
|
| 117 |
+
self.do_center_crop = do_center_crop
|
| 118 |
+
self.crop_size = crop_size
|
| 119 |
+
self.do_rescale = do_rescale
|
| 120 |
+
self.rescale_factor = rescale_factor
|
| 121 |
+
self.do_normalize = do_normalize
|
| 122 |
+
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
|
| 123 |
+
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
|
| 124 |
+
self.do_convert_rgb = do_convert_rgb
|
| 125 |
+
self._valid_processor_keys = [
|
| 126 |
+
"images",
|
| 127 |
+
"do_resize",
|
| 128 |
+
"size",
|
| 129 |
+
"resample",
|
| 130 |
+
"do_center_crop",
|
| 131 |
+
"crop_size",
|
| 132 |
+
"do_rescale",
|
| 133 |
+
"rescale_factor",
|
| 134 |
+
"do_normalize",
|
| 135 |
+
"image_mean",
|
| 136 |
+
"image_std",
|
| 137 |
+
"do_convert_rgb",
|
| 138 |
+
"return_tensors",
|
| 139 |
+
"data_format",
|
| 140 |
+
"input_data_format",
|
| 141 |
+
]
|
| 142 |
+
|
| 143 |
+
# for backwards compatibility of KOSMOS-2
|
| 144 |
+
if "use_square_size" in kwargs and kwargs["use_square_size"]:
|
| 145 |
+
self.size = {"height": size["shortest_edge"], "width": size["shortest_edge"]}
|
| 146 |
+
# Let's remove `use_square_size` (as it is removed from #27690), so the future Kosmos-2 image processors
|
| 147 |
+
# won't have this attr. being saved. (otherwise, it will enter this if branch while there is no more
|
| 148 |
+
# `shortest_edge` key.
|
| 149 |
+
delattr(self, "use_square_size")
|
| 150 |
+
|
| 151 |
+
def resize(
|
| 152 |
+
self,
|
| 153 |
+
image: np.ndarray,
|
| 154 |
+
size: Dict[str, int],
|
| 155 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
| 156 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 157 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 158 |
+
**kwargs,
|
| 159 |
+
) -> np.ndarray:
|
| 160 |
+
"""
|
| 161 |
+
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
|
| 162 |
+
resized to keep the input aspect ratio.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
image (`np.ndarray`):
|
| 166 |
+
Image to resize.
|
| 167 |
+
size (`Dict[str, int]`):
|
| 168 |
+
Size of the output image.
|
| 169 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
|
| 170 |
+
Resampling filter to use when resiizing the image.
|
| 171 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
| 172 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
| 173 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 174 |
+
The channel dimension format of the input image. If not provided, it will be inferred.
|
| 175 |
+
"""
|
| 176 |
+
default_to_square = True
|
| 177 |
+
if "shortest_edge" in size:
|
| 178 |
+
size = size["shortest_edge"]
|
| 179 |
+
default_to_square = False
|
| 180 |
+
elif "height" in size and "width" in size:
|
| 181 |
+
size = (size["height"], size["width"])
|
| 182 |
+
else:
|
| 183 |
+
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
|
| 184 |
+
|
| 185 |
+
output_size = get_resize_output_image_size(
|
| 186 |
+
image,
|
| 187 |
+
size=size,
|
| 188 |
+
default_to_square=default_to_square,
|
| 189 |
+
input_data_format=input_data_format,
|
| 190 |
+
)
|
| 191 |
+
return resize(
|
| 192 |
+
image,
|
| 193 |
+
size=output_size,
|
| 194 |
+
resample=resample,
|
| 195 |
+
data_format=data_format,
|
| 196 |
+
input_data_format=input_data_format,
|
| 197 |
+
**kwargs,
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
def preprocess(
|
| 201 |
+
self,
|
| 202 |
+
images: ImageInput,
|
| 203 |
+
do_resize: bool = None,
|
| 204 |
+
size: Dict[str, int] = None,
|
| 205 |
+
resample: PILImageResampling = None,
|
| 206 |
+
do_center_crop: bool = None,
|
| 207 |
+
crop_size: int = None,
|
| 208 |
+
do_rescale: bool = None,
|
| 209 |
+
rescale_factor: float = None,
|
| 210 |
+
do_normalize: bool = None,
|
| 211 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 212 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 213 |
+
do_convert_rgb: bool = None,
|
| 214 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
| 215 |
+
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
| 216 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 217 |
+
**kwargs,
|
| 218 |
+
) -> PIL.Image.Image:
|
| 219 |
+
"""
|
| 220 |
+
Preprocess an image or batch of images.
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
images (`ImageInput`):
|
| 224 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
| 225 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
| 226 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
| 227 |
+
Whether to resize the image.
|
| 228 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
| 229 |
+
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
|
| 230 |
+
the longest edge resized to keep the input aspect ratio.
|
| 231 |
+
resample (`int`, *optional*, defaults to `self.resample`):
|
| 232 |
+
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
|
| 233 |
+
has an effect if `do_resize` is set to `True`.
|
| 234 |
+
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
|
| 235 |
+
Whether to center crop the image.
|
| 236 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
|
| 237 |
+
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
|
| 238 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
| 239 |
+
Whether to rescale the image.
|
| 240 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
| 241 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
| 242 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
| 243 |
+
Whether to normalize the image.
|
| 244 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
| 245 |
+
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
|
| 246 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
| 247 |
+
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
|
| 248 |
+
`True`.
|
| 249 |
+
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
|
| 250 |
+
Whether to convert the image to RGB.
|
| 251 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
| 252 |
+
The type of tensors to return. Can be one of:
|
| 253 |
+
- Unset: Return a list of `np.ndarray`.
|
| 254 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
| 255 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
| 256 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
| 257 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
| 258 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
| 259 |
+
The channel dimension format for the output image. Can be one of:
|
| 260 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 261 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 262 |
+
- Unset: Use the channel dimension format of the input image.
|
| 263 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 264 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
| 265 |
+
from the input image. Can be one of:
|
| 266 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 267 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 268 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
| 269 |
+
"""
|
| 270 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
| 271 |
+
size = size if size is not None else self.size
|
| 272 |
+
size = get_size_dict(size, param_name="size", default_to_square=False)
|
| 273 |
+
resample = resample if resample is not None else self.resample
|
| 274 |
+
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
|
| 275 |
+
crop_size = crop_size if crop_size is not None else self.crop_size
|
| 276 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
|
| 277 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
| 278 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
| 279 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
| 280 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
| 281 |
+
image_std = image_std if image_std is not None else self.image_std
|
| 282 |
+
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
|
| 283 |
+
|
| 284 |
+
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
|
| 285 |
+
|
| 286 |
+
images = make_list_of_images(images)
|
| 287 |
+
|
| 288 |
+
if not valid_images(images):
|
| 289 |
+
raise ValueError(
|
| 290 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
| 291 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
| 292 |
+
)
|
| 293 |
+
validate_preprocess_arguments(
|
| 294 |
+
do_rescale=do_rescale,
|
| 295 |
+
rescale_factor=rescale_factor,
|
| 296 |
+
do_normalize=do_normalize,
|
| 297 |
+
image_mean=image_mean,
|
| 298 |
+
image_std=image_std,
|
| 299 |
+
do_center_crop=do_center_crop,
|
| 300 |
+
crop_size=crop_size,
|
| 301 |
+
do_resize=do_resize,
|
| 302 |
+
size=size,
|
| 303 |
+
resample=resample,
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
if do_convert_rgb:
|
| 307 |
+
images = [convert_to_rgb(image) for image in images]
|
| 308 |
+
|
| 309 |
+
# All transformations expect numpy arrays.
|
| 310 |
+
images = [to_numpy_array(image) for image in images]
|
| 311 |
+
|
| 312 |
+
if do_rescale and is_scaled_image(images[0]):
|
| 313 |
+
logger.warning_once(
|
| 314 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
| 315 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
if input_data_format is None:
|
| 319 |
+
# We assume that all images have the same channel dimension format.
|
| 320 |
+
input_data_format = infer_channel_dimension_format(images[0])
|
| 321 |
+
|
| 322 |
+
all_images = []
|
| 323 |
+
for image in images:
|
| 324 |
+
if do_resize:
|
| 325 |
+
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
|
| 326 |
+
|
| 327 |
+
if do_center_crop:
|
| 328 |
+
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
|
| 329 |
+
|
| 330 |
+
if do_rescale:
|
| 331 |
+
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
| 332 |
+
|
| 333 |
+
if do_normalize:
|
| 334 |
+
image = self.normalize(
|
| 335 |
+
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
all_images.append(image)
|
| 339 |
+
images = [
|
| 340 |
+
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
|
| 341 |
+
for image in all_images
|
| 342 |
+
]
|
| 343 |
+
|
| 344 |
+
data = {"pixel_values": images}
|
| 345 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
__all__ = ["CLIPImageProcessor"]
|
janus/lib/python3.10/site-packages/transformers/models/clip/modeling_clip.py
ADDED
|
@@ -0,0 +1,1689 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""PyTorch CLIP model."""
|
| 16 |
+
|
| 17 |
+
from dataclasses import dataclass
|
| 18 |
+
from typing import Any, Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.utils.checkpoint
|
| 22 |
+
from torch import nn
|
| 23 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 24 |
+
|
| 25 |
+
from ...activations import ACT2FN
|
| 26 |
+
from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
|
| 27 |
+
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
|
| 28 |
+
from ...modeling_utils import PreTrainedModel
|
| 29 |
+
from ...pytorch_utils import is_torch_greater_or_equal_than_2_2
|
| 30 |
+
from ...utils import (
|
| 31 |
+
ModelOutput,
|
| 32 |
+
add_code_sample_docstrings,
|
| 33 |
+
add_start_docstrings,
|
| 34 |
+
add_start_docstrings_to_model_forward,
|
| 35 |
+
is_flash_attn_2_available,
|
| 36 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 37 |
+
logging,
|
| 38 |
+
replace_return_docstrings,
|
| 39 |
+
torch_int,
|
| 40 |
+
)
|
| 41 |
+
from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
if is_flash_attn_2_available():
|
| 45 |
+
from ...modeling_flash_attention_utils import _flash_attention_forward
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
logger = logging.get_logger(__name__)
|
| 49 |
+
|
| 50 |
+
# General docstring
|
| 51 |
+
_CONFIG_FOR_DOC = "CLIPConfig"
|
| 52 |
+
_CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32"
|
| 53 |
+
|
| 54 |
+
# Image classification docstring
|
| 55 |
+
_IMAGE_CLASS_CHECKPOINT = "openai/clip-vit-base-patch32"
|
| 56 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "LABEL_0"
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# contrastive loss function, adapted from
|
| 60 |
+
# https://sachinruk.github.io/blog/2021-03-07-clip.html
|
| 61 |
+
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
|
| 62 |
+
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def clip_loss(similarity: torch.Tensor) -> torch.Tensor:
|
| 66 |
+
caption_loss = contrastive_loss(similarity)
|
| 67 |
+
image_loss = contrastive_loss(similarity.t())
|
| 68 |
+
return (caption_loss + image_loss) / 2.0
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _get_vector_norm(tensor: torch.Tensor) -> torch.Tensor:
|
| 72 |
+
"""
|
| 73 |
+
This method is equivalent to tensor.norm(p=2, dim=-1, keepdim=True) and used to make
|
| 74 |
+
model `executorch` exportable. See issue https://github.com/pytorch/executorch/issues/3566
|
| 75 |
+
"""
|
| 76 |
+
square_tensor = torch.pow(tensor, 2)
|
| 77 |
+
sum_tensor = torch.sum(square_tensor, dim=-1, keepdim=True)
|
| 78 |
+
normed_tensor = torch.pow(sum_tensor, 0.5)
|
| 79 |
+
return normed_tensor
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@dataclass
|
| 83 |
+
class CLIPVisionModelOutput(ModelOutput):
|
| 84 |
+
"""
|
| 85 |
+
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
| 89 |
+
The image embeddings obtained by applying the projection layer to the pooler_output.
|
| 90 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 91 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 92 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 93 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 94 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 95 |
+
|
| 96 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 97 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 98 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| 99 |
+
sequence_length)`.
|
| 100 |
+
|
| 101 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| 102 |
+
heads.
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
image_embeds: Optional[torch.FloatTensor] = None
|
| 106 |
+
last_hidden_state: torch.FloatTensor = None
|
| 107 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 108 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@dataclass
|
| 112 |
+
class CLIPTextModelOutput(ModelOutput):
|
| 113 |
+
"""
|
| 114 |
+
Base class for text model's outputs that also contains a pooling of the last hidden states.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
| 118 |
+
The text embeddings obtained by applying the projection layer to the pooler_output.
|
| 119 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 120 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 121 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 122 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 123 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 124 |
+
|
| 125 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 126 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 127 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| 128 |
+
sequence_length)`.
|
| 129 |
+
|
| 130 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| 131 |
+
heads.
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
text_embeds: Optional[torch.FloatTensor] = None
|
| 135 |
+
last_hidden_state: torch.FloatTensor = None
|
| 136 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 137 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@dataclass
|
| 141 |
+
class CLIPOutput(ModelOutput):
|
| 142 |
+
"""
|
| 143 |
+
Args:
|
| 144 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
|
| 145 |
+
Contrastive loss for image-text similarity.
|
| 146 |
+
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
|
| 147 |
+
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
|
| 148 |
+
similarity scores.
|
| 149 |
+
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
|
| 150 |
+
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
|
| 151 |
+
similarity scores.
|
| 152 |
+
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
| 153 |
+
The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`].
|
| 154 |
+
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
| 155 |
+
The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`].
|
| 156 |
+
text_model_output (`BaseModelOutputWithPooling`):
|
| 157 |
+
The output of the [`CLIPTextModel`].
|
| 158 |
+
vision_model_output (`BaseModelOutputWithPooling`):
|
| 159 |
+
The output of the [`CLIPVisionModel`].
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
loss: Optional[torch.FloatTensor] = None
|
| 163 |
+
logits_per_image: torch.FloatTensor = None
|
| 164 |
+
logits_per_text: torch.FloatTensor = None
|
| 165 |
+
text_embeds: torch.FloatTensor = None
|
| 166 |
+
image_embeds: torch.FloatTensor = None
|
| 167 |
+
text_model_output: BaseModelOutputWithPooling = None
|
| 168 |
+
vision_model_output: BaseModelOutputWithPooling = None
|
| 169 |
+
|
| 170 |
+
def to_tuple(self) -> Tuple[Any]:
|
| 171 |
+
return tuple(
|
| 172 |
+
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
|
| 173 |
+
for k in self.keys()
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class CLIPVisionEmbeddings(nn.Module):
|
| 178 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 179 |
+
super().__init__()
|
| 180 |
+
self.config = config
|
| 181 |
+
self.embed_dim = config.hidden_size
|
| 182 |
+
self.image_size = config.image_size
|
| 183 |
+
self.patch_size = config.patch_size
|
| 184 |
+
|
| 185 |
+
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
|
| 186 |
+
|
| 187 |
+
self.patch_embedding = nn.Conv2d(
|
| 188 |
+
in_channels=config.num_channels,
|
| 189 |
+
out_channels=self.embed_dim,
|
| 190 |
+
kernel_size=self.patch_size,
|
| 191 |
+
stride=self.patch_size,
|
| 192 |
+
bias=False,
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
| 196 |
+
self.num_positions = self.num_patches + 1
|
| 197 |
+
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
| 198 |
+
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
| 199 |
+
|
| 200 |
+
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
|
| 201 |
+
"""
|
| 202 |
+
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
|
| 203 |
+
images. This method is also adapted to support torch.jit tracing.
|
| 204 |
+
|
| 205 |
+
Adapted from:
|
| 206 |
+
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
|
| 207 |
+
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
|
| 208 |
+
"""
|
| 209 |
+
|
| 210 |
+
num_patches = embeddings.shape[1] - 1
|
| 211 |
+
position_embedding = self.position_embedding.weight.unsqueeze(0)
|
| 212 |
+
num_positions = position_embedding.shape[1] - 1
|
| 213 |
+
|
| 214 |
+
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
|
| 215 |
+
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
|
| 216 |
+
return self.position_embedding(self.position_ids)
|
| 217 |
+
|
| 218 |
+
class_pos_embed = position_embedding[:, :1]
|
| 219 |
+
patch_pos_embed = position_embedding[:, 1:]
|
| 220 |
+
|
| 221 |
+
dim = embeddings.shape[-1]
|
| 222 |
+
|
| 223 |
+
new_height = height // self.patch_size
|
| 224 |
+
new_width = width // self.patch_size
|
| 225 |
+
|
| 226 |
+
sqrt_num_positions = torch_int(num_positions**0.5)
|
| 227 |
+
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
|
| 228 |
+
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
|
| 229 |
+
|
| 230 |
+
patch_pos_embed = nn.functional.interpolate(
|
| 231 |
+
patch_pos_embed,
|
| 232 |
+
size=(new_height, new_width),
|
| 233 |
+
mode="bicubic",
|
| 234 |
+
align_corners=False,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
| 238 |
+
|
| 239 |
+
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
|
| 240 |
+
|
| 241 |
+
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
|
| 242 |
+
batch_size, _, height, width = pixel_values.shape
|
| 243 |
+
if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size):
|
| 244 |
+
raise ValueError(
|
| 245 |
+
f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size}*{self.image_size})."
|
| 246 |
+
)
|
| 247 |
+
target_dtype = self.patch_embedding.weight.dtype
|
| 248 |
+
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
|
| 249 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
| 250 |
+
|
| 251 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
|
| 252 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
| 253 |
+
if interpolate_pos_encoding:
|
| 254 |
+
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
|
| 255 |
+
else:
|
| 256 |
+
embeddings = embeddings + self.position_embedding(self.position_ids)
|
| 257 |
+
return embeddings
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class CLIPTextEmbeddings(nn.Module):
|
| 261 |
+
def __init__(self, config: CLIPTextConfig):
|
| 262 |
+
super().__init__()
|
| 263 |
+
embed_dim = config.hidden_size
|
| 264 |
+
|
| 265 |
+
self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
|
| 266 |
+
self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
|
| 267 |
+
|
| 268 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
| 269 |
+
self.register_buffer(
|
| 270 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
def forward(
|
| 274 |
+
self,
|
| 275 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 276 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 277 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 278 |
+
) -> torch.Tensor:
|
| 279 |
+
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
|
| 280 |
+
|
| 281 |
+
if position_ids is None:
|
| 282 |
+
position_ids = self.position_ids[:, :seq_length]
|
| 283 |
+
|
| 284 |
+
if inputs_embeds is None:
|
| 285 |
+
inputs_embeds = self.token_embedding(input_ids)
|
| 286 |
+
|
| 287 |
+
position_embeddings = self.position_embedding(position_ids)
|
| 288 |
+
embeddings = inputs_embeds + position_embeddings
|
| 289 |
+
|
| 290 |
+
return embeddings
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
class CLIPAttention(nn.Module):
|
| 294 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 295 |
+
|
| 296 |
+
def __init__(self, config):
|
| 297 |
+
super().__init__()
|
| 298 |
+
self.config = config
|
| 299 |
+
self.embed_dim = config.hidden_size
|
| 300 |
+
self.num_heads = config.num_attention_heads
|
| 301 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 302 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
| 303 |
+
raise ValueError(
|
| 304 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
| 305 |
+
f" {self.num_heads})."
|
| 306 |
+
)
|
| 307 |
+
self.scale = self.head_dim**-0.5
|
| 308 |
+
self.dropout = config.attention_dropout
|
| 309 |
+
|
| 310 |
+
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 311 |
+
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 312 |
+
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 313 |
+
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 314 |
+
|
| 315 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 316 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 317 |
+
|
| 318 |
+
def forward(
|
| 319 |
+
self,
|
| 320 |
+
hidden_states: torch.Tensor,
|
| 321 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 322 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
| 323 |
+
output_attentions: Optional[bool] = False,
|
| 324 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 325 |
+
"""Input shape: Batch x Time x Channel"""
|
| 326 |
+
|
| 327 |
+
bsz, tgt_len, embed_dim = hidden_states.size()
|
| 328 |
+
|
| 329 |
+
# get query proj
|
| 330 |
+
query_states = self.q_proj(hidden_states) * self.scale
|
| 331 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| 332 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
| 333 |
+
|
| 334 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
| 335 |
+
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
| 336 |
+
key_states = key_states.view(*proj_shape)
|
| 337 |
+
value_states = value_states.view(*proj_shape)
|
| 338 |
+
|
| 339 |
+
src_len = key_states.size(1)
|
| 340 |
+
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
| 341 |
+
|
| 342 |
+
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
| 343 |
+
raise ValueError(
|
| 344 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
| 345 |
+
f" {attn_weights.size()}"
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
# apply the causal_attention_mask first
|
| 349 |
+
if causal_attention_mask is not None:
|
| 350 |
+
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
| 351 |
+
raise ValueError(
|
| 352 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
|
| 353 |
+
f" {causal_attention_mask.size()}"
|
| 354 |
+
)
|
| 355 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
|
| 356 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 357 |
+
|
| 358 |
+
if attention_mask is not None:
|
| 359 |
+
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
| 360 |
+
raise ValueError(
|
| 361 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
| 362 |
+
)
|
| 363 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
|
| 364 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 365 |
+
|
| 366 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
| 367 |
+
|
| 368 |
+
if output_attentions:
|
| 369 |
+
# this operation is a bit akward, but it's required to
|
| 370 |
+
# make sure that attn_weights keeps its gradient.
|
| 371 |
+
# In order to do so, attn_weights have to reshaped
|
| 372 |
+
# twice and have to be reused in the following
|
| 373 |
+
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 374 |
+
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
| 375 |
+
else:
|
| 376 |
+
attn_weights_reshaped = None
|
| 377 |
+
|
| 378 |
+
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
| 379 |
+
|
| 380 |
+
attn_output = torch.bmm(attn_probs, value_states)
|
| 381 |
+
|
| 382 |
+
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
| 383 |
+
raise ValueError(
|
| 384 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
| 385 |
+
f" {attn_output.size()}"
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
| 389 |
+
attn_output = attn_output.transpose(1, 2)
|
| 390 |
+
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
|
| 391 |
+
|
| 392 |
+
attn_output = self.out_proj(attn_output)
|
| 393 |
+
|
| 394 |
+
return attn_output, attn_weights_reshaped
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
class CLIPFlashAttention2(CLIPAttention):
|
| 398 |
+
"""
|
| 399 |
+
CLIPAttention flash attention module. This module inherits from `CLIPAttention` as the weights of the module stays
|
| 400 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| 401 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
| 402 |
+
"""
|
| 403 |
+
|
| 404 |
+
def __init__(self, *args, **kwargs):
|
| 405 |
+
super().__init__(*args, **kwargs)
|
| 406 |
+
|
| 407 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
| 408 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
| 409 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
| 410 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 411 |
+
|
| 412 |
+
# Adapted from transformers.models.llama.modeling_llama.LlamaFlashAttention2.forward
|
| 413 |
+
def forward(
|
| 414 |
+
self,
|
| 415 |
+
hidden_states: torch.Tensor,
|
| 416 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 417 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
| 418 |
+
output_attentions: Optional[bool] = False,
|
| 419 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 420 |
+
output_attentions = False
|
| 421 |
+
|
| 422 |
+
batch_size, q_len, _ = hidden_states.size()
|
| 423 |
+
|
| 424 |
+
query_states = self.q_proj(hidden_states)
|
| 425 |
+
key_states = self.k_proj(hidden_states)
|
| 426 |
+
value_states = self.v_proj(hidden_states)
|
| 427 |
+
|
| 428 |
+
# Flash attention requires the input to have the shape
|
| 429 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
| 430 |
+
# therefore we just need to keep the original shape
|
| 431 |
+
query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim)
|
| 432 |
+
key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim)
|
| 433 |
+
value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim)
|
| 434 |
+
|
| 435 |
+
dropout_rate = self.dropout if self.training else 0.0
|
| 436 |
+
|
| 437 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 438 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 439 |
+
# cast them back in the correct dtype just to be sure everything works as expected.
|
| 440 |
+
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
| 441 |
+
# in fp32.
|
| 442 |
+
|
| 443 |
+
input_dtype = query_states.dtype
|
| 444 |
+
if input_dtype == torch.float32:
|
| 445 |
+
if torch.is_autocast_enabled():
|
| 446 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 447 |
+
# Handle the case where the model is quantized
|
| 448 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 449 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 450 |
+
else:
|
| 451 |
+
target_dtype = self.q_proj.weight.dtype
|
| 452 |
+
|
| 453 |
+
logger.warning_once(
|
| 454 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 455 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 456 |
+
f" {target_dtype}."
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
query_states = query_states.to(target_dtype)
|
| 460 |
+
key_states = key_states.to(target_dtype)
|
| 461 |
+
value_states = value_states.to(target_dtype)
|
| 462 |
+
|
| 463 |
+
attn_output = _flash_attention_forward(
|
| 464 |
+
query_states,
|
| 465 |
+
key_states,
|
| 466 |
+
value_states,
|
| 467 |
+
attention_mask,
|
| 468 |
+
q_len,
|
| 469 |
+
dropout=dropout_rate,
|
| 470 |
+
is_causal=causal_attention_mask is not None,
|
| 471 |
+
use_top_left_mask=self._flash_attn_uses_top_left_mask,
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim).contiguous()
|
| 475 |
+
attn_output = self.out_proj(attn_output)
|
| 476 |
+
|
| 477 |
+
if not output_attentions:
|
| 478 |
+
attn_weights = None
|
| 479 |
+
|
| 480 |
+
return attn_output, attn_weights
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
class CLIPSdpaAttention(CLIPAttention):
|
| 484 |
+
"""
|
| 485 |
+
SDPA attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 486 |
+
`CLIPAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 487 |
+
SDPA API.
|
| 488 |
+
"""
|
| 489 |
+
|
| 490 |
+
# Adapted from CLIPAttention.forward
|
| 491 |
+
def forward(
|
| 492 |
+
self,
|
| 493 |
+
hidden_states: torch.Tensor,
|
| 494 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 495 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
| 496 |
+
output_attentions: Optional[bool] = False,
|
| 497 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 498 |
+
if output_attentions:
|
| 499 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 500 |
+
logger.warning_once(
|
| 501 |
+
"CLIPModel is using CLIPSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not "
|
| 502 |
+
"support `output_attentions=True`. Falling back to the manual attention implementation, but specifying "
|
| 503 |
+
"the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can "
|
| 504 |
+
'be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 505 |
+
)
|
| 506 |
+
return super().forward(
|
| 507 |
+
hidden_states=hidden_states,
|
| 508 |
+
attention_mask=attention_mask,
|
| 509 |
+
causal_attention_mask=causal_attention_mask,
|
| 510 |
+
output_attentions=output_attentions,
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
# CLIP text model uses both `causal_attention_mask` and `attention_mask`
|
| 514 |
+
if attention_mask is not None and causal_attention_mask is not None:
|
| 515 |
+
attn_mask = attention_mask + causal_attention_mask
|
| 516 |
+
elif causal_attention_mask is not None:
|
| 517 |
+
attn_mask = causal_attention_mask
|
| 518 |
+
else:
|
| 519 |
+
attn_mask = attention_mask
|
| 520 |
+
|
| 521 |
+
bsz, tgt_len, embed_dim = hidden_states.size()
|
| 522 |
+
|
| 523 |
+
query_states = self.q_proj(hidden_states)
|
| 524 |
+
key_states = self.k_proj(hidden_states)
|
| 525 |
+
value_states = self.v_proj(hidden_states)
|
| 526 |
+
|
| 527 |
+
query_states = query_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
|
| 528 |
+
key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
|
| 529 |
+
value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
|
| 530 |
+
|
| 531 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 532 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 533 |
+
if not is_torch_greater_or_equal_than_2_2 and query_states.device.type == "cuda" and attn_mask is not None:
|
| 534 |
+
query_states = query_states.contiguous()
|
| 535 |
+
key_states = key_states.contiguous()
|
| 536 |
+
value_states = value_states.contiguous()
|
| 537 |
+
|
| 538 |
+
# CLIP text model uses both `causal_attention_mask` and `attention_mask` sequentially.
|
| 539 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 540 |
+
query_states,
|
| 541 |
+
key_states,
|
| 542 |
+
value_states,
|
| 543 |
+
attn_mask=attn_mask,
|
| 544 |
+
dropout_p=self.dropout if self.training else 0.0,
|
| 545 |
+
scale=self.scale,
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
attn_output = attn_output.transpose(1, 2)
|
| 549 |
+
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
|
| 550 |
+
|
| 551 |
+
attn_output = self.out_proj(attn_output)
|
| 552 |
+
|
| 553 |
+
return attn_output, None
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
CLIP_ATTENTION_CLASSES = {
|
| 557 |
+
"eager": CLIPAttention,
|
| 558 |
+
"sdpa": CLIPSdpaAttention,
|
| 559 |
+
"flash_attention_2": CLIPFlashAttention2,
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
class CLIPMLP(nn.Module):
|
| 564 |
+
def __init__(self, config):
|
| 565 |
+
super().__init__()
|
| 566 |
+
self.config = config
|
| 567 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
| 568 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 569 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 570 |
+
|
| 571 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 572 |
+
hidden_states = self.fc1(hidden_states)
|
| 573 |
+
hidden_states = self.activation_fn(hidden_states)
|
| 574 |
+
hidden_states = self.fc2(hidden_states)
|
| 575 |
+
return hidden_states
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
class CLIPEncoderLayer(nn.Module):
|
| 579 |
+
def __init__(self, config: CLIPConfig):
|
| 580 |
+
super().__init__()
|
| 581 |
+
self.embed_dim = config.hidden_size
|
| 582 |
+
self.self_attn = CLIP_ATTENTION_CLASSES[config._attn_implementation](config)
|
| 583 |
+
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 584 |
+
self.mlp = CLIPMLP(config)
|
| 585 |
+
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 586 |
+
|
| 587 |
+
def forward(
|
| 588 |
+
self,
|
| 589 |
+
hidden_states: torch.Tensor,
|
| 590 |
+
attention_mask: torch.Tensor,
|
| 591 |
+
causal_attention_mask: torch.Tensor,
|
| 592 |
+
output_attentions: Optional[bool] = False,
|
| 593 |
+
) -> Tuple[torch.FloatTensor]:
|
| 594 |
+
"""
|
| 595 |
+
Args:
|
| 596 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 597 |
+
attention_mask (`torch.FloatTensor`): attention mask of size
|
| 598 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 599 |
+
`(config.encoder_attention_heads,)`.
|
| 600 |
+
output_attentions (`bool`, *optional*):
|
| 601 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 602 |
+
returned tensors for more detail.
|
| 603 |
+
"""
|
| 604 |
+
residual = hidden_states
|
| 605 |
+
|
| 606 |
+
hidden_states = self.layer_norm1(hidden_states)
|
| 607 |
+
hidden_states, attn_weights = self.self_attn(
|
| 608 |
+
hidden_states=hidden_states,
|
| 609 |
+
attention_mask=attention_mask,
|
| 610 |
+
causal_attention_mask=causal_attention_mask,
|
| 611 |
+
output_attentions=output_attentions,
|
| 612 |
+
)
|
| 613 |
+
hidden_states = residual + hidden_states
|
| 614 |
+
|
| 615 |
+
residual = hidden_states
|
| 616 |
+
hidden_states = self.layer_norm2(hidden_states)
|
| 617 |
+
hidden_states = self.mlp(hidden_states)
|
| 618 |
+
hidden_states = residual + hidden_states
|
| 619 |
+
|
| 620 |
+
outputs = (hidden_states,)
|
| 621 |
+
|
| 622 |
+
if output_attentions:
|
| 623 |
+
outputs += (attn_weights,)
|
| 624 |
+
|
| 625 |
+
return outputs
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
class CLIPPreTrainedModel(PreTrainedModel):
|
| 629 |
+
"""
|
| 630 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 631 |
+
models.
|
| 632 |
+
"""
|
| 633 |
+
|
| 634 |
+
config_class = CLIPConfig
|
| 635 |
+
base_model_prefix = "clip"
|
| 636 |
+
supports_gradient_checkpointing = True
|
| 637 |
+
_supports_sdpa = True
|
| 638 |
+
_supports_flash_attn_2 = True
|
| 639 |
+
|
| 640 |
+
def _init_weights(self, module):
|
| 641 |
+
"""Initialize the weights"""
|
| 642 |
+
factor = self.config.initializer_factor
|
| 643 |
+
if isinstance(module, CLIPTextEmbeddings):
|
| 644 |
+
module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
|
| 645 |
+
module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
|
| 646 |
+
elif isinstance(module, CLIPVisionEmbeddings):
|
| 647 |
+
factor = self.config.initializer_factor
|
| 648 |
+
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
|
| 649 |
+
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
|
| 650 |
+
nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
|
| 651 |
+
elif isinstance(module, CLIPAttention):
|
| 652 |
+
factor = self.config.initializer_factor
|
| 653 |
+
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
| 654 |
+
out_proj_std = (module.embed_dim**-0.5) * factor
|
| 655 |
+
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
|
| 656 |
+
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
|
| 657 |
+
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
|
| 658 |
+
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
|
| 659 |
+
elif isinstance(module, CLIPMLP):
|
| 660 |
+
factor = self.config.initializer_factor
|
| 661 |
+
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
| 662 |
+
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
|
| 663 |
+
nn.init.normal_(module.fc1.weight, std=fc_std)
|
| 664 |
+
nn.init.normal_(module.fc2.weight, std=in_proj_std)
|
| 665 |
+
elif isinstance(module, CLIPModel):
|
| 666 |
+
nn.init.normal_(
|
| 667 |
+
module.text_projection.weight,
|
| 668 |
+
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
|
| 669 |
+
)
|
| 670 |
+
nn.init.normal_(
|
| 671 |
+
module.visual_projection.weight,
|
| 672 |
+
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
|
| 673 |
+
)
|
| 674 |
+
elif isinstance(module, CLIPVisionModelWithProjection):
|
| 675 |
+
nn.init.normal_(
|
| 676 |
+
module.visual_projection.weight,
|
| 677 |
+
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
|
| 678 |
+
)
|
| 679 |
+
elif isinstance(module, CLIPTextModelWithProjection):
|
| 680 |
+
nn.init.normal_(
|
| 681 |
+
module.text_projection.weight,
|
| 682 |
+
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
|
| 683 |
+
)
|
| 684 |
+
elif isinstance(module, CLIPForImageClassification):
|
| 685 |
+
nn.init.normal_(
|
| 686 |
+
module.classifier.weight,
|
| 687 |
+
std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor,
|
| 688 |
+
)
|
| 689 |
+
|
| 690 |
+
if isinstance(module, nn.LayerNorm):
|
| 691 |
+
module.bias.data.zero_()
|
| 692 |
+
module.weight.data.fill_(1.0)
|
| 693 |
+
if isinstance(module, nn.Linear) and module.bias is not None:
|
| 694 |
+
module.bias.data.zero_()
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
CLIP_START_DOCSTRING = r"""
|
| 698 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 699 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 700 |
+
etc.)
|
| 701 |
+
|
| 702 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 703 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 704 |
+
and behavior.
|
| 705 |
+
|
| 706 |
+
Parameters:
|
| 707 |
+
config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
|
| 708 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 709 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 710 |
+
"""
|
| 711 |
+
|
| 712 |
+
CLIP_TEXT_INPUTS_DOCSTRING = r"""
|
| 713 |
+
Args:
|
| 714 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 715 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 716 |
+
it.
|
| 717 |
+
|
| 718 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 719 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 720 |
+
|
| 721 |
+
[What are input IDs?](../glossary#input-ids)
|
| 722 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 723 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 724 |
+
|
| 725 |
+
- 1 for tokens that are **not masked**,
|
| 726 |
+
- 0 for tokens that are **masked**.
|
| 727 |
+
|
| 728 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 729 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 730 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 731 |
+
config.max_position_embeddings - 1]`.
|
| 732 |
+
|
| 733 |
+
[What are position IDs?](../glossary#position-ids)
|
| 734 |
+
output_attentions (`bool`, *optional*):
|
| 735 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 736 |
+
tensors for more detail.
|
| 737 |
+
output_hidden_states (`bool`, *optional*):
|
| 738 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 739 |
+
more detail.
|
| 740 |
+
return_dict (`bool`, *optional*):
|
| 741 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 742 |
+
"""
|
| 743 |
+
|
| 744 |
+
CLIP_VISION_INPUTS_DOCSTRING = r"""
|
| 745 |
+
Args:
|
| 746 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 747 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
| 748 |
+
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| 749 |
+
output_attentions (`bool`, *optional*):
|
| 750 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 751 |
+
tensors for more detail.
|
| 752 |
+
output_hidden_states (`bool`, *optional*):
|
| 753 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 754 |
+
more detail.
|
| 755 |
+
interpolate_pos_encoding (`bool`, *optional*, defaults `False`):
|
| 756 |
+
Whether to interpolate the pre-trained position encodings.
|
| 757 |
+
return_dict (`bool`, *optional*):
|
| 758 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 759 |
+
"""
|
| 760 |
+
|
| 761 |
+
CLIP_INPUTS_DOCSTRING = r"""
|
| 762 |
+
Args:
|
| 763 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 764 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 765 |
+
it.
|
| 766 |
+
|
| 767 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 768 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 769 |
+
|
| 770 |
+
[What are input IDs?](../glossary#input-ids)
|
| 771 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 772 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 773 |
+
|
| 774 |
+
- 1 for tokens that are **not masked**,
|
| 775 |
+
- 0 for tokens that are **masked**.
|
| 776 |
+
|
| 777 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 778 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 779 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 780 |
+
config.max_position_embeddings - 1]`.
|
| 781 |
+
|
| 782 |
+
[What are position IDs?](../glossary#position-ids)
|
| 783 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 784 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
| 785 |
+
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| 786 |
+
return_loss (`bool`, *optional*):
|
| 787 |
+
Whether or not to return the contrastive loss.
|
| 788 |
+
output_attentions (`bool`, *optional*):
|
| 789 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 790 |
+
tensors for more detail.
|
| 791 |
+
output_hidden_states (`bool`, *optional*):
|
| 792 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 793 |
+
more detail.
|
| 794 |
+
interpolate_pos_encoding (`bool`, *optional*, defaults `False`):
|
| 795 |
+
Whether to interpolate the pre-trained position encodings.
|
| 796 |
+
return_dict (`bool`, *optional*):
|
| 797 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 798 |
+
"""
|
| 799 |
+
|
| 800 |
+
|
| 801 |
+
class CLIPEncoder(nn.Module):
|
| 802 |
+
"""
|
| 803 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| 804 |
+
[`CLIPEncoderLayer`].
|
| 805 |
+
|
| 806 |
+
Args:
|
| 807 |
+
config: CLIPConfig
|
| 808 |
+
"""
|
| 809 |
+
|
| 810 |
+
def __init__(self, config: CLIPConfig):
|
| 811 |
+
super().__init__()
|
| 812 |
+
self.config = config
|
| 813 |
+
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 814 |
+
self.gradient_checkpointing = False
|
| 815 |
+
|
| 816 |
+
def forward(
|
| 817 |
+
self,
|
| 818 |
+
inputs_embeds,
|
| 819 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 820 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
| 821 |
+
output_attentions: Optional[bool] = None,
|
| 822 |
+
output_hidden_states: Optional[bool] = None,
|
| 823 |
+
return_dict: Optional[bool] = None,
|
| 824 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 825 |
+
r"""
|
| 826 |
+
Args:
|
| 827 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 828 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
| 829 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
| 830 |
+
than the model's internal embedding lookup matrix.
|
| 831 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 832 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 833 |
+
|
| 834 |
+
- 1 for tokens that are **not masked**,
|
| 835 |
+
- 0 for tokens that are **masked**.
|
| 836 |
+
|
| 837 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 838 |
+
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 839 |
+
Causal mask for the text model. Mask values selected in `[0, 1]`:
|
| 840 |
+
|
| 841 |
+
- 1 for tokens that are **not masked**,
|
| 842 |
+
- 0 for tokens that are **masked**.
|
| 843 |
+
|
| 844 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 845 |
+
output_attentions (`bool`, *optional*):
|
| 846 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 847 |
+
returned tensors for more detail.
|
| 848 |
+
output_hidden_states (`bool`, *optional*):
|
| 849 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 850 |
+
for more detail.
|
| 851 |
+
return_dict (`bool`, *optional*):
|
| 852 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 853 |
+
"""
|
| 854 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 855 |
+
output_hidden_states = (
|
| 856 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 857 |
+
)
|
| 858 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 859 |
+
|
| 860 |
+
encoder_states = () if output_hidden_states else None
|
| 861 |
+
all_attentions = () if output_attentions else None
|
| 862 |
+
|
| 863 |
+
hidden_states = inputs_embeds
|
| 864 |
+
for idx, encoder_layer in enumerate(self.layers):
|
| 865 |
+
if output_hidden_states:
|
| 866 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 867 |
+
if self.gradient_checkpointing and self.training:
|
| 868 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 869 |
+
encoder_layer.__call__,
|
| 870 |
+
hidden_states,
|
| 871 |
+
attention_mask,
|
| 872 |
+
causal_attention_mask,
|
| 873 |
+
output_attentions,
|
| 874 |
+
)
|
| 875 |
+
else:
|
| 876 |
+
layer_outputs = encoder_layer(
|
| 877 |
+
hidden_states,
|
| 878 |
+
attention_mask,
|
| 879 |
+
causal_attention_mask,
|
| 880 |
+
output_attentions=output_attentions,
|
| 881 |
+
)
|
| 882 |
+
|
| 883 |
+
hidden_states = layer_outputs[0]
|
| 884 |
+
|
| 885 |
+
if output_attentions:
|
| 886 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
| 887 |
+
|
| 888 |
+
if output_hidden_states:
|
| 889 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 890 |
+
|
| 891 |
+
if not return_dict:
|
| 892 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
| 893 |
+
return BaseModelOutput(
|
| 894 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
| 895 |
+
)
|
| 896 |
+
|
| 897 |
+
|
| 898 |
+
class CLIPTextTransformer(nn.Module):
|
| 899 |
+
def __init__(self, config: CLIPTextConfig):
|
| 900 |
+
super().__init__()
|
| 901 |
+
self.config = config
|
| 902 |
+
embed_dim = config.hidden_size
|
| 903 |
+
self.embeddings = CLIPTextEmbeddings(config)
|
| 904 |
+
self.encoder = CLIPEncoder(config)
|
| 905 |
+
self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 906 |
+
|
| 907 |
+
# For `pooled_output` computation
|
| 908 |
+
self.eos_token_id = config.eos_token_id
|
| 909 |
+
|
| 910 |
+
# For attention mask, it differs between `flash_attention_2` and other attention implementations
|
| 911 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
| 912 |
+
|
| 913 |
+
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| 914 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig)
|
| 915 |
+
def forward(
|
| 916 |
+
self,
|
| 917 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 918 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 919 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 920 |
+
output_attentions: Optional[bool] = None,
|
| 921 |
+
output_hidden_states: Optional[bool] = None,
|
| 922 |
+
return_dict: Optional[bool] = None,
|
| 923 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 924 |
+
r"""
|
| 925 |
+
Returns:
|
| 926 |
+
|
| 927 |
+
"""
|
| 928 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 929 |
+
output_hidden_states = (
|
| 930 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 931 |
+
)
|
| 932 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 933 |
+
|
| 934 |
+
if input_ids is None:
|
| 935 |
+
raise ValueError("You have to specify input_ids")
|
| 936 |
+
|
| 937 |
+
input_shape = input_ids.size()
|
| 938 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
| 939 |
+
|
| 940 |
+
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
|
| 941 |
+
|
| 942 |
+
# CLIP's text model uses causal mask, prepare it here.
|
| 943 |
+
# https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
|
| 944 |
+
causal_attention_mask = _create_4d_causal_attention_mask(
|
| 945 |
+
input_shape, hidden_states.dtype, device=hidden_states.device
|
| 946 |
+
)
|
| 947 |
+
|
| 948 |
+
# expand attention_mask
|
| 949 |
+
if attention_mask is not None and not self._use_flash_attention_2:
|
| 950 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
| 951 |
+
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
|
| 952 |
+
|
| 953 |
+
encoder_outputs = self.encoder(
|
| 954 |
+
inputs_embeds=hidden_states,
|
| 955 |
+
attention_mask=attention_mask,
|
| 956 |
+
causal_attention_mask=causal_attention_mask,
|
| 957 |
+
output_attentions=output_attentions,
|
| 958 |
+
output_hidden_states=output_hidden_states,
|
| 959 |
+
return_dict=return_dict,
|
| 960 |
+
)
|
| 961 |
+
|
| 962 |
+
last_hidden_state = encoder_outputs[0]
|
| 963 |
+
last_hidden_state = self.final_layer_norm(last_hidden_state)
|
| 964 |
+
|
| 965 |
+
if self.eos_token_id == 2:
|
| 966 |
+
# The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
|
| 967 |
+
# A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
|
| 968 |
+
# ------------------------------------------------------------
|
| 969 |
+
# text_embeds.shape = [batch_size, sequence_length, transformer.width]
|
| 970 |
+
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 971 |
+
# casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
|
| 972 |
+
pooled_output = last_hidden_state[
|
| 973 |
+
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
|
| 974 |
+
input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
|
| 975 |
+
]
|
| 976 |
+
else:
|
| 977 |
+
# The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
|
| 978 |
+
pooled_output = last_hidden_state[
|
| 979 |
+
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
|
| 980 |
+
# We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)
|
| 981 |
+
# Note: we assume each sequence (along batch dim.) contains an `eos_token_id` (e.g. prepared by the tokenizer)
|
| 982 |
+
(input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)
|
| 983 |
+
.int()
|
| 984 |
+
.argmax(dim=-1),
|
| 985 |
+
]
|
| 986 |
+
|
| 987 |
+
if not return_dict:
|
| 988 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 989 |
+
|
| 990 |
+
return BaseModelOutputWithPooling(
|
| 991 |
+
last_hidden_state=last_hidden_state,
|
| 992 |
+
pooler_output=pooled_output,
|
| 993 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 994 |
+
attentions=encoder_outputs.attentions,
|
| 995 |
+
)
|
| 996 |
+
|
| 997 |
+
|
| 998 |
+
@add_start_docstrings(
|
| 999 |
+
"""The text model from CLIP without any head or projection on top.""",
|
| 1000 |
+
CLIP_START_DOCSTRING,
|
| 1001 |
+
)
|
| 1002 |
+
class CLIPTextModel(CLIPPreTrainedModel):
|
| 1003 |
+
config_class = CLIPTextConfig
|
| 1004 |
+
|
| 1005 |
+
_no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
|
| 1006 |
+
|
| 1007 |
+
def __init__(self, config: CLIPTextConfig):
|
| 1008 |
+
super().__init__(config)
|
| 1009 |
+
self.text_model = CLIPTextTransformer(config)
|
| 1010 |
+
# Initialize weights and apply final processing
|
| 1011 |
+
self.post_init()
|
| 1012 |
+
|
| 1013 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 1014 |
+
return self.text_model.embeddings.token_embedding
|
| 1015 |
+
|
| 1016 |
+
def set_input_embeddings(self, value):
|
| 1017 |
+
self.text_model.embeddings.token_embedding = value
|
| 1018 |
+
|
| 1019 |
+
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| 1020 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig)
|
| 1021 |
+
def forward(
|
| 1022 |
+
self,
|
| 1023 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1024 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1025 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1026 |
+
output_attentions: Optional[bool] = None,
|
| 1027 |
+
output_hidden_states: Optional[bool] = None,
|
| 1028 |
+
return_dict: Optional[bool] = None,
|
| 1029 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 1030 |
+
r"""
|
| 1031 |
+
Returns:
|
| 1032 |
+
|
| 1033 |
+
Examples:
|
| 1034 |
+
|
| 1035 |
+
```python
|
| 1036 |
+
>>> from transformers import AutoTokenizer, CLIPTextModel
|
| 1037 |
+
|
| 1038 |
+
>>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1039 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 1040 |
+
|
| 1041 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
| 1042 |
+
|
| 1043 |
+
>>> outputs = model(**inputs)
|
| 1044 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
| 1045 |
+
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
|
| 1046 |
+
```"""
|
| 1047 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1048 |
+
|
| 1049 |
+
return self.text_model(
|
| 1050 |
+
input_ids=input_ids,
|
| 1051 |
+
attention_mask=attention_mask,
|
| 1052 |
+
position_ids=position_ids,
|
| 1053 |
+
output_attentions=output_attentions,
|
| 1054 |
+
output_hidden_states=output_hidden_states,
|
| 1055 |
+
return_dict=return_dict,
|
| 1056 |
+
)
|
| 1057 |
+
|
| 1058 |
+
|
| 1059 |
+
class CLIPVisionTransformer(nn.Module):
|
| 1060 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 1061 |
+
super().__init__()
|
| 1062 |
+
self.config = config
|
| 1063 |
+
embed_dim = config.hidden_size
|
| 1064 |
+
|
| 1065 |
+
self.embeddings = CLIPVisionEmbeddings(config)
|
| 1066 |
+
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 1067 |
+
self.encoder = CLIPEncoder(config)
|
| 1068 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 1069 |
+
|
| 1070 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 1071 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
|
| 1072 |
+
def forward(
|
| 1073 |
+
self,
|
| 1074 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 1075 |
+
output_attentions: Optional[bool] = None,
|
| 1076 |
+
output_hidden_states: Optional[bool] = None,
|
| 1077 |
+
return_dict: Optional[bool] = None,
|
| 1078 |
+
interpolate_pos_encoding: Optional[bool] = False,
|
| 1079 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 1080 |
+
r"""
|
| 1081 |
+
Returns:
|
| 1082 |
+
|
| 1083 |
+
"""
|
| 1084 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1085 |
+
output_hidden_states = (
|
| 1086 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1087 |
+
)
|
| 1088 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1089 |
+
|
| 1090 |
+
if pixel_values is None:
|
| 1091 |
+
raise ValueError("You have to specify pixel_values")
|
| 1092 |
+
|
| 1093 |
+
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
|
| 1094 |
+
hidden_states = self.pre_layrnorm(hidden_states)
|
| 1095 |
+
|
| 1096 |
+
encoder_outputs = self.encoder(
|
| 1097 |
+
inputs_embeds=hidden_states,
|
| 1098 |
+
output_attentions=output_attentions,
|
| 1099 |
+
output_hidden_states=output_hidden_states,
|
| 1100 |
+
return_dict=return_dict,
|
| 1101 |
+
)
|
| 1102 |
+
|
| 1103 |
+
last_hidden_state = encoder_outputs[0]
|
| 1104 |
+
pooled_output = last_hidden_state[:, 0, :]
|
| 1105 |
+
pooled_output = self.post_layernorm(pooled_output)
|
| 1106 |
+
|
| 1107 |
+
if not return_dict:
|
| 1108 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 1109 |
+
|
| 1110 |
+
return BaseModelOutputWithPooling(
|
| 1111 |
+
last_hidden_state=last_hidden_state,
|
| 1112 |
+
pooler_output=pooled_output,
|
| 1113 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 1114 |
+
attentions=encoder_outputs.attentions,
|
| 1115 |
+
)
|
| 1116 |
+
|
| 1117 |
+
|
| 1118 |
+
@add_start_docstrings(
|
| 1119 |
+
"""The vision model from CLIP without any head or projection on top.""",
|
| 1120 |
+
CLIP_START_DOCSTRING,
|
| 1121 |
+
)
|
| 1122 |
+
class CLIPVisionModel(CLIPPreTrainedModel):
|
| 1123 |
+
config_class = CLIPVisionConfig
|
| 1124 |
+
main_input_name = "pixel_values"
|
| 1125 |
+
_no_split_modules = ["CLIPEncoderLayer"]
|
| 1126 |
+
|
| 1127 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 1128 |
+
super().__init__(config)
|
| 1129 |
+
self.vision_model = CLIPVisionTransformer(config)
|
| 1130 |
+
# Initialize weights and apply final processing
|
| 1131 |
+
self.post_init()
|
| 1132 |
+
|
| 1133 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 1134 |
+
return self.vision_model.embeddings.patch_embedding
|
| 1135 |
+
|
| 1136 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 1137 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
|
| 1138 |
+
def forward(
|
| 1139 |
+
self,
|
| 1140 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 1141 |
+
output_attentions: Optional[bool] = None,
|
| 1142 |
+
output_hidden_states: Optional[bool] = None,
|
| 1143 |
+
interpolate_pos_encoding: bool = False,
|
| 1144 |
+
return_dict: Optional[bool] = None,
|
| 1145 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 1146 |
+
r"""
|
| 1147 |
+
Returns:
|
| 1148 |
+
|
| 1149 |
+
Examples:
|
| 1150 |
+
|
| 1151 |
+
```python
|
| 1152 |
+
>>> from PIL import Image
|
| 1153 |
+
>>> import requests
|
| 1154 |
+
>>> from transformers import AutoProcessor, CLIPVisionModel
|
| 1155 |
+
|
| 1156 |
+
>>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1157 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1158 |
+
|
| 1159 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1160 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1161 |
+
|
| 1162 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
| 1163 |
+
|
| 1164 |
+
>>> outputs = model(**inputs)
|
| 1165 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
| 1166 |
+
>>> pooled_output = outputs.pooler_output # pooled CLS states
|
| 1167 |
+
```"""
|
| 1168 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1169 |
+
|
| 1170 |
+
return self.vision_model(
|
| 1171 |
+
pixel_values=pixel_values,
|
| 1172 |
+
output_attentions=output_attentions,
|
| 1173 |
+
output_hidden_states=output_hidden_states,
|
| 1174 |
+
return_dict=return_dict,
|
| 1175 |
+
interpolate_pos_encoding=interpolate_pos_encoding,
|
| 1176 |
+
)
|
| 1177 |
+
|
| 1178 |
+
|
| 1179 |
+
@add_start_docstrings(CLIP_START_DOCSTRING)
|
| 1180 |
+
class CLIPModel(CLIPPreTrainedModel):
|
| 1181 |
+
config_class = CLIPConfig
|
| 1182 |
+
_no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer", "CLIPVisionEmbeddings"]
|
| 1183 |
+
|
| 1184 |
+
def __init__(self, config: CLIPConfig):
|
| 1185 |
+
super().__init__(config)
|
| 1186 |
+
|
| 1187 |
+
if not isinstance(config.text_config, CLIPTextConfig):
|
| 1188 |
+
raise TypeError(
|
| 1189 |
+
"config.text_config is expected to be of type CLIPTextConfig but is of type"
|
| 1190 |
+
f" {type(config.text_config)}."
|
| 1191 |
+
)
|
| 1192 |
+
|
| 1193 |
+
if not isinstance(config.vision_config, CLIPVisionConfig):
|
| 1194 |
+
raise TypeError(
|
| 1195 |
+
"config.vision_config is expected to be of type CLIPVisionConfig but is of type"
|
| 1196 |
+
f" {type(config.vision_config)}."
|
| 1197 |
+
)
|
| 1198 |
+
|
| 1199 |
+
text_config = config.text_config
|
| 1200 |
+
vision_config = config.vision_config
|
| 1201 |
+
|
| 1202 |
+
self.projection_dim = config.projection_dim
|
| 1203 |
+
self.text_embed_dim = text_config.hidden_size
|
| 1204 |
+
self.vision_embed_dim = vision_config.hidden_size
|
| 1205 |
+
|
| 1206 |
+
text_model = CLIPTextModel._from_config(text_config)
|
| 1207 |
+
self.text_model = text_model.text_model
|
| 1208 |
+
|
| 1209 |
+
vision_model = CLIPVisionModel._from_config(vision_config)
|
| 1210 |
+
self.vision_model = vision_model.vision_model
|
| 1211 |
+
|
| 1212 |
+
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
|
| 1213 |
+
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
|
| 1214 |
+
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
|
| 1215 |
+
|
| 1216 |
+
# Initialize weights and apply final processing
|
| 1217 |
+
self.post_init()
|
| 1218 |
+
|
| 1219 |
+
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| 1220 |
+
def get_text_features(
|
| 1221 |
+
self,
|
| 1222 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1223 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1224 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1225 |
+
output_attentions: Optional[bool] = None,
|
| 1226 |
+
output_hidden_states: Optional[bool] = None,
|
| 1227 |
+
return_dict: Optional[bool] = None,
|
| 1228 |
+
) -> torch.FloatTensor:
|
| 1229 |
+
r"""
|
| 1230 |
+
Returns:
|
| 1231 |
+
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
|
| 1232 |
+
applying the projection layer to the pooled output of [`CLIPTextModel`].
|
| 1233 |
+
|
| 1234 |
+
Examples:
|
| 1235 |
+
|
| 1236 |
+
```python
|
| 1237 |
+
>>> from transformers import AutoTokenizer, CLIPModel
|
| 1238 |
+
|
| 1239 |
+
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1240 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 1241 |
+
|
| 1242 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
| 1243 |
+
>>> text_features = model.get_text_features(**inputs)
|
| 1244 |
+
```"""
|
| 1245 |
+
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
| 1246 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1247 |
+
output_hidden_states = (
|
| 1248 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1249 |
+
)
|
| 1250 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1251 |
+
|
| 1252 |
+
text_outputs = self.text_model(
|
| 1253 |
+
input_ids=input_ids,
|
| 1254 |
+
attention_mask=attention_mask,
|
| 1255 |
+
position_ids=position_ids,
|
| 1256 |
+
output_attentions=output_attentions,
|
| 1257 |
+
output_hidden_states=output_hidden_states,
|
| 1258 |
+
return_dict=return_dict,
|
| 1259 |
+
)
|
| 1260 |
+
|
| 1261 |
+
pooled_output = text_outputs[1]
|
| 1262 |
+
text_features = self.text_projection(pooled_output)
|
| 1263 |
+
|
| 1264 |
+
return text_features
|
| 1265 |
+
|
| 1266 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 1267 |
+
def get_image_features(
|
| 1268 |
+
self,
|
| 1269 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 1270 |
+
output_attentions: Optional[bool] = None,
|
| 1271 |
+
output_hidden_states: Optional[bool] = None,
|
| 1272 |
+
interpolate_pos_encoding: bool = False,
|
| 1273 |
+
return_dict: Optional[bool] = None,
|
| 1274 |
+
) -> torch.FloatTensor:
|
| 1275 |
+
r"""
|
| 1276 |
+
Returns:
|
| 1277 |
+
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
|
| 1278 |
+
applying the projection layer to the pooled output of [`CLIPVisionModel`].
|
| 1279 |
+
|
| 1280 |
+
Examples:
|
| 1281 |
+
|
| 1282 |
+
```python
|
| 1283 |
+
>>> from PIL import Image
|
| 1284 |
+
>>> import requests
|
| 1285 |
+
>>> from transformers import AutoProcessor, CLIPModel
|
| 1286 |
+
|
| 1287 |
+
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1288 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1289 |
+
|
| 1290 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1291 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1292 |
+
|
| 1293 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
| 1294 |
+
|
| 1295 |
+
>>> image_features = model.get_image_features(**inputs)
|
| 1296 |
+
```"""
|
| 1297 |
+
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
| 1298 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1299 |
+
output_hidden_states = (
|
| 1300 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1301 |
+
)
|
| 1302 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1303 |
+
|
| 1304 |
+
vision_outputs = self.vision_model(
|
| 1305 |
+
pixel_values=pixel_values,
|
| 1306 |
+
output_attentions=output_attentions,
|
| 1307 |
+
output_hidden_states=output_hidden_states,
|
| 1308 |
+
interpolate_pos_encoding=interpolate_pos_encoding,
|
| 1309 |
+
return_dict=return_dict,
|
| 1310 |
+
)
|
| 1311 |
+
|
| 1312 |
+
pooled_output = vision_outputs[1] # pooled_output
|
| 1313 |
+
image_features = self.visual_projection(pooled_output)
|
| 1314 |
+
|
| 1315 |
+
return image_features
|
| 1316 |
+
|
| 1317 |
+
@add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING)
|
| 1318 |
+
@replace_return_docstrings(output_type=CLIPOutput, config_class=CLIPConfig)
|
| 1319 |
+
def forward(
|
| 1320 |
+
self,
|
| 1321 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1322 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 1323 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1324 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1325 |
+
return_loss: Optional[bool] = None,
|
| 1326 |
+
output_attentions: Optional[bool] = None,
|
| 1327 |
+
output_hidden_states: Optional[bool] = None,
|
| 1328 |
+
interpolate_pos_encoding: bool = False,
|
| 1329 |
+
return_dict: Optional[bool] = None,
|
| 1330 |
+
) -> Union[Tuple, CLIPOutput]:
|
| 1331 |
+
r"""
|
| 1332 |
+
Returns:
|
| 1333 |
+
|
| 1334 |
+
Examples:
|
| 1335 |
+
|
| 1336 |
+
```python
|
| 1337 |
+
>>> from PIL import Image
|
| 1338 |
+
>>> import requests
|
| 1339 |
+
>>> from transformers import AutoProcessor, CLIPModel
|
| 1340 |
+
|
| 1341 |
+
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1342 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1343 |
+
|
| 1344 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1345 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1346 |
+
|
| 1347 |
+
>>> inputs = processor(
|
| 1348 |
+
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
|
| 1349 |
+
... )
|
| 1350 |
+
|
| 1351 |
+
>>> outputs = model(**inputs)
|
| 1352 |
+
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
| 1353 |
+
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
| 1354 |
+
```"""
|
| 1355 |
+
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
| 1356 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1357 |
+
output_hidden_states = (
|
| 1358 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1359 |
+
)
|
| 1360 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1361 |
+
|
| 1362 |
+
vision_outputs = self.vision_model(
|
| 1363 |
+
pixel_values=pixel_values,
|
| 1364 |
+
output_attentions=output_attentions,
|
| 1365 |
+
output_hidden_states=output_hidden_states,
|
| 1366 |
+
interpolate_pos_encoding=interpolate_pos_encoding,
|
| 1367 |
+
return_dict=return_dict,
|
| 1368 |
+
)
|
| 1369 |
+
|
| 1370 |
+
text_outputs = self.text_model(
|
| 1371 |
+
input_ids=input_ids,
|
| 1372 |
+
attention_mask=attention_mask,
|
| 1373 |
+
position_ids=position_ids,
|
| 1374 |
+
output_attentions=output_attentions,
|
| 1375 |
+
output_hidden_states=output_hidden_states,
|
| 1376 |
+
return_dict=return_dict,
|
| 1377 |
+
)
|
| 1378 |
+
|
| 1379 |
+
image_embeds = vision_outputs[1]
|
| 1380 |
+
image_embeds = self.visual_projection(image_embeds)
|
| 1381 |
+
|
| 1382 |
+
text_embeds = text_outputs[1]
|
| 1383 |
+
text_embeds = self.text_projection(text_embeds)
|
| 1384 |
+
|
| 1385 |
+
# normalized features
|
| 1386 |
+
image_embeds = image_embeds / _get_vector_norm(image_embeds)
|
| 1387 |
+
text_embeds = text_embeds / _get_vector_norm(text_embeds)
|
| 1388 |
+
|
| 1389 |
+
# cosine similarity as logits
|
| 1390 |
+
logit_scale = self.logit_scale.exp()
|
| 1391 |
+
logits_per_text = torch.matmul(text_embeds, image_embeds.t().to(text_embeds.device)) * logit_scale.to(
|
| 1392 |
+
text_embeds.device
|
| 1393 |
+
)
|
| 1394 |
+
logits_per_image = logits_per_text.t()
|
| 1395 |
+
|
| 1396 |
+
loss = None
|
| 1397 |
+
if return_loss:
|
| 1398 |
+
loss = clip_loss(logits_per_text)
|
| 1399 |
+
|
| 1400 |
+
if not return_dict:
|
| 1401 |
+
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
|
| 1402 |
+
return ((loss,) + output) if loss is not None else output
|
| 1403 |
+
|
| 1404 |
+
return CLIPOutput(
|
| 1405 |
+
loss=loss,
|
| 1406 |
+
logits_per_image=logits_per_image,
|
| 1407 |
+
logits_per_text=logits_per_text,
|
| 1408 |
+
text_embeds=text_embeds,
|
| 1409 |
+
image_embeds=image_embeds,
|
| 1410 |
+
text_model_output=text_outputs,
|
| 1411 |
+
vision_model_output=vision_outputs,
|
| 1412 |
+
)
|
| 1413 |
+
|
| 1414 |
+
|
| 1415 |
+
@add_start_docstrings(
|
| 1416 |
+
"""
|
| 1417 |
+
CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output).
|
| 1418 |
+
""",
|
| 1419 |
+
CLIP_START_DOCSTRING,
|
| 1420 |
+
)
|
| 1421 |
+
class CLIPTextModelWithProjection(CLIPPreTrainedModel):
|
| 1422 |
+
config_class = CLIPTextConfig
|
| 1423 |
+
|
| 1424 |
+
_no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
|
| 1425 |
+
|
| 1426 |
+
def __init__(self, config: CLIPTextConfig):
|
| 1427 |
+
super().__init__(config)
|
| 1428 |
+
|
| 1429 |
+
text_model = CLIPTextModel._from_config(config, attn_implementation=config._attn_implementation)
|
| 1430 |
+
self.text_model = text_model.text_model
|
| 1431 |
+
|
| 1432 |
+
self.text_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
| 1433 |
+
|
| 1434 |
+
# Initialize weights and apply final processing
|
| 1435 |
+
self.post_init()
|
| 1436 |
+
|
| 1437 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 1438 |
+
return self.text_model.embeddings.token_embedding
|
| 1439 |
+
|
| 1440 |
+
def set_input_embeddings(self, value):
|
| 1441 |
+
self.text_model.embeddings.token_embedding = value
|
| 1442 |
+
|
| 1443 |
+
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| 1444 |
+
@replace_return_docstrings(output_type=CLIPTextModelOutput, config_class=CLIPTextConfig)
|
| 1445 |
+
def forward(
|
| 1446 |
+
self,
|
| 1447 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1448 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1449 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1450 |
+
output_attentions: Optional[bool] = None,
|
| 1451 |
+
output_hidden_states: Optional[bool] = None,
|
| 1452 |
+
return_dict: Optional[bool] = None,
|
| 1453 |
+
) -> Union[Tuple, CLIPTextModelOutput]:
|
| 1454 |
+
r"""
|
| 1455 |
+
Returns:
|
| 1456 |
+
|
| 1457 |
+
Examples:
|
| 1458 |
+
|
| 1459 |
+
```python
|
| 1460 |
+
>>> from transformers import AutoTokenizer, CLIPTextModelWithProjection
|
| 1461 |
+
|
| 1462 |
+
>>> model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
|
| 1463 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 1464 |
+
|
| 1465 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
| 1466 |
+
|
| 1467 |
+
>>> outputs = model(**inputs)
|
| 1468 |
+
>>> text_embeds = outputs.text_embeds
|
| 1469 |
+
```"""
|
| 1470 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1471 |
+
|
| 1472 |
+
text_outputs = self.text_model(
|
| 1473 |
+
input_ids=input_ids,
|
| 1474 |
+
attention_mask=attention_mask,
|
| 1475 |
+
position_ids=position_ids,
|
| 1476 |
+
output_attentions=output_attentions,
|
| 1477 |
+
output_hidden_states=output_hidden_states,
|
| 1478 |
+
return_dict=return_dict,
|
| 1479 |
+
)
|
| 1480 |
+
|
| 1481 |
+
pooled_output = text_outputs[1]
|
| 1482 |
+
|
| 1483 |
+
text_embeds = self.text_projection(pooled_output)
|
| 1484 |
+
|
| 1485 |
+
if not return_dict:
|
| 1486 |
+
outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]
|
| 1487 |
+
return tuple(output for output in outputs if output is not None)
|
| 1488 |
+
|
| 1489 |
+
return CLIPTextModelOutput(
|
| 1490 |
+
text_embeds=text_embeds,
|
| 1491 |
+
last_hidden_state=text_outputs.last_hidden_state,
|
| 1492 |
+
hidden_states=text_outputs.hidden_states,
|
| 1493 |
+
attentions=text_outputs.attentions,
|
| 1494 |
+
)
|
| 1495 |
+
|
| 1496 |
+
|
| 1497 |
+
@add_start_docstrings(
|
| 1498 |
+
"""
|
| 1499 |
+
CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output).
|
| 1500 |
+
""",
|
| 1501 |
+
CLIP_START_DOCSTRING,
|
| 1502 |
+
)
|
| 1503 |
+
class CLIPVisionModelWithProjection(CLIPPreTrainedModel):
|
| 1504 |
+
config_class = CLIPVisionConfig
|
| 1505 |
+
main_input_name = "pixel_values"
|
| 1506 |
+
|
| 1507 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 1508 |
+
super().__init__(config)
|
| 1509 |
+
|
| 1510 |
+
vision_model = CLIPVisionModel._from_config(config, attn_implementation=config._attn_implementation)
|
| 1511 |
+
self.vision_model = vision_model.vision_model
|
| 1512 |
+
|
| 1513 |
+
self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
| 1514 |
+
|
| 1515 |
+
# Initialize weights and apply final processing
|
| 1516 |
+
self.post_init()
|
| 1517 |
+
|
| 1518 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 1519 |
+
return self.vision_model.embeddings.patch_embedding
|
| 1520 |
+
|
| 1521 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 1522 |
+
@replace_return_docstrings(output_type=CLIPVisionModelOutput, config_class=CLIPVisionConfig)
|
| 1523 |
+
def forward(
|
| 1524 |
+
self,
|
| 1525 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 1526 |
+
output_attentions: Optional[bool] = None,
|
| 1527 |
+
output_hidden_states: Optional[bool] = None,
|
| 1528 |
+
interpolate_pos_encoding: bool = False,
|
| 1529 |
+
return_dict: Optional[bool] = None,
|
| 1530 |
+
) -> Union[Tuple, CLIPVisionModelOutput]:
|
| 1531 |
+
r"""
|
| 1532 |
+
Returns:
|
| 1533 |
+
|
| 1534 |
+
Examples:
|
| 1535 |
+
|
| 1536 |
+
```python
|
| 1537 |
+
>>> from PIL import Image
|
| 1538 |
+
>>> import requests
|
| 1539 |
+
>>> from transformers import AutoProcessor, CLIPVisionModelWithProjection
|
| 1540 |
+
|
| 1541 |
+
>>> model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
|
| 1542 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1543 |
+
|
| 1544 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1545 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1546 |
+
|
| 1547 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
| 1548 |
+
|
| 1549 |
+
>>> outputs = model(**inputs)
|
| 1550 |
+
>>> image_embeds = outputs.image_embeds
|
| 1551 |
+
```"""
|
| 1552 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1553 |
+
|
| 1554 |
+
vision_outputs = self.vision_model(
|
| 1555 |
+
pixel_values=pixel_values,
|
| 1556 |
+
output_attentions=output_attentions,
|
| 1557 |
+
output_hidden_states=output_hidden_states,
|
| 1558 |
+
interpolate_pos_encoding=interpolate_pos_encoding,
|
| 1559 |
+
return_dict=return_dict,
|
| 1560 |
+
)
|
| 1561 |
+
|
| 1562 |
+
pooled_output = vision_outputs[1] # pooled_output
|
| 1563 |
+
|
| 1564 |
+
image_embeds = self.visual_projection(pooled_output)
|
| 1565 |
+
|
| 1566 |
+
if not return_dict:
|
| 1567 |
+
outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:]
|
| 1568 |
+
return tuple(output for output in outputs if output is not None)
|
| 1569 |
+
|
| 1570 |
+
return CLIPVisionModelOutput(
|
| 1571 |
+
image_embeds=image_embeds,
|
| 1572 |
+
last_hidden_state=vision_outputs.last_hidden_state,
|
| 1573 |
+
hidden_states=vision_outputs.hidden_states,
|
| 1574 |
+
attentions=vision_outputs.attentions,
|
| 1575 |
+
)
|
| 1576 |
+
|
| 1577 |
+
|
| 1578 |
+
@add_start_docstrings(
|
| 1579 |
+
"""
|
| 1580 |
+
CLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of
|
| 1581 |
+
the patch tokens) e.g. for ImageNet.
|
| 1582 |
+
""",
|
| 1583 |
+
CLIP_START_DOCSTRING,
|
| 1584 |
+
)
|
| 1585 |
+
class CLIPForImageClassification(CLIPPreTrainedModel):
|
| 1586 |
+
main_input_name = "pixel_values"
|
| 1587 |
+
|
| 1588 |
+
def __init__(self, config: CLIPConfig) -> None:
|
| 1589 |
+
super().__init__(config)
|
| 1590 |
+
|
| 1591 |
+
self.num_labels = config.num_labels
|
| 1592 |
+
vision_model = CLIPVisionModel._from_config(config.vision_config)
|
| 1593 |
+
self.vision_model = vision_model.vision_model
|
| 1594 |
+
|
| 1595 |
+
# Classifier head
|
| 1596 |
+
self.classifier = (
|
| 1597 |
+
nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
| 1598 |
+
)
|
| 1599 |
+
|
| 1600 |
+
# Initialize weights and apply final processing
|
| 1601 |
+
self.post_init()
|
| 1602 |
+
|
| 1603 |
+
@add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING)
|
| 1604 |
+
@add_code_sample_docstrings(
|
| 1605 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
| 1606 |
+
output_type=ImageClassifierOutput,
|
| 1607 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1608 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
| 1609 |
+
)
|
| 1610 |
+
def forward(
|
| 1611 |
+
self,
|
| 1612 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 1613 |
+
labels: Optional[torch.Tensor] = None,
|
| 1614 |
+
output_attentions: Optional[bool] = None,
|
| 1615 |
+
output_hidden_states: Optional[bool] = None,
|
| 1616 |
+
return_dict: Optional[bool] = None,
|
| 1617 |
+
) -> Union[tuple, ImageClassifierOutput]:
|
| 1618 |
+
r"""
|
| 1619 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1620 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
| 1621 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1622 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1623 |
+
"""
|
| 1624 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1625 |
+
output_hidden_states = (
|
| 1626 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1627 |
+
)
|
| 1628 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1629 |
+
|
| 1630 |
+
outputs = self.vision_model(
|
| 1631 |
+
pixel_values,
|
| 1632 |
+
output_attentions=output_attentions,
|
| 1633 |
+
output_hidden_states=output_hidden_states,
|
| 1634 |
+
return_dict=return_dict,
|
| 1635 |
+
)
|
| 1636 |
+
|
| 1637 |
+
sequence_output = outputs[0]
|
| 1638 |
+
|
| 1639 |
+
# average pool the patch tokens
|
| 1640 |
+
sequence_output = torch.mean(sequence_output[:, 1:, :], dim=1)
|
| 1641 |
+
# apply classifier
|
| 1642 |
+
logits = self.classifier(sequence_output)
|
| 1643 |
+
|
| 1644 |
+
loss = None
|
| 1645 |
+
if labels is not None:
|
| 1646 |
+
# move labels to correct device to enable model parallelism
|
| 1647 |
+
labels = labels.to(logits.device)
|
| 1648 |
+
if self.config.problem_type is None:
|
| 1649 |
+
if self.num_labels == 1:
|
| 1650 |
+
self.config.problem_type = "regression"
|
| 1651 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1652 |
+
self.config.problem_type = "single_label_classification"
|
| 1653 |
+
else:
|
| 1654 |
+
self.config.problem_type = "multi_label_classification"
|
| 1655 |
+
|
| 1656 |
+
if self.config.problem_type == "regression":
|
| 1657 |
+
loss_fct = MSELoss()
|
| 1658 |
+
if self.num_labels == 1:
|
| 1659 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 1660 |
+
else:
|
| 1661 |
+
loss = loss_fct(logits, labels)
|
| 1662 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1663 |
+
loss_fct = CrossEntropyLoss()
|
| 1664 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 1665 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1666 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1667 |
+
loss = loss_fct(logits, labels)
|
| 1668 |
+
|
| 1669 |
+
if not return_dict:
|
| 1670 |
+
output = (logits,) + outputs[2:]
|
| 1671 |
+
return ((loss,) + output) if loss is not None else output
|
| 1672 |
+
|
| 1673 |
+
return ImageClassifierOutput(
|
| 1674 |
+
loss=loss,
|
| 1675 |
+
logits=logits,
|
| 1676 |
+
hidden_states=outputs.hidden_states,
|
| 1677 |
+
attentions=outputs.attentions,
|
| 1678 |
+
)
|
| 1679 |
+
|
| 1680 |
+
|
| 1681 |
+
__all__ = [
|
| 1682 |
+
"CLIPModel",
|
| 1683 |
+
"CLIPPreTrainedModel",
|
| 1684 |
+
"CLIPTextModel",
|
| 1685 |
+
"CLIPTextModelWithProjection",
|
| 1686 |
+
"CLIPVisionModel",
|
| 1687 |
+
"CLIPVisionModelWithProjection",
|
| 1688 |
+
"CLIPForImageClassification",
|
| 1689 |
+
]
|
janus/lib/python3.10/site-packages/transformers/models/clip/modeling_flax_clip.py
ADDED
|
@@ -0,0 +1,1306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The OpenAI Team Authors, The Google Flax Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
from typing import Any, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import flax
|
| 19 |
+
import flax.linen as nn
|
| 20 |
+
import jax
|
| 21 |
+
import jax.numpy as jnp
|
| 22 |
+
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
|
| 23 |
+
from flax.linen import combine_masks, make_causal_mask
|
| 24 |
+
from flax.linen.attention import dot_product_attention_weights
|
| 25 |
+
from flax.traverse_util import flatten_dict, unflatten_dict
|
| 26 |
+
from jax import lax
|
| 27 |
+
|
| 28 |
+
from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling
|
| 29 |
+
from ...modeling_flax_utils import (
|
| 30 |
+
ACT2FN,
|
| 31 |
+
FlaxPreTrainedModel,
|
| 32 |
+
append_replace_return_docstrings,
|
| 33 |
+
overwrite_call_docstring,
|
| 34 |
+
)
|
| 35 |
+
from ...utils import ModelOutput, add_start_docstrings, logging
|
| 36 |
+
from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
logger = logging.get_logger(__name__)
|
| 40 |
+
|
| 41 |
+
CLIP_START_DOCSTRING = r"""
|
| 42 |
+
|
| 43 |
+
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 44 |
+
library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
|
| 45 |
+
|
| 46 |
+
This model is also a
|
| 47 |
+
[flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
|
| 48 |
+
a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
|
| 49 |
+
behavior.
|
| 50 |
+
|
| 51 |
+
Finally, this model supports inherent JAX features such as:
|
| 52 |
+
|
| 53 |
+
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
|
| 54 |
+
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
|
| 55 |
+
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
|
| 56 |
+
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
|
| 57 |
+
|
| 58 |
+
Parameters:
|
| 59 |
+
config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
|
| 60 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 61 |
+
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
|
| 62 |
+
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
|
| 63 |
+
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
|
| 64 |
+
`jax.numpy.bfloat16` (on TPUs).
|
| 65 |
+
|
| 66 |
+
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
|
| 67 |
+
specified all the computation will be performed with the given `dtype`.
|
| 68 |
+
|
| 69 |
+
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
|
| 70 |
+
parameters.**
|
| 71 |
+
|
| 72 |
+
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
|
| 73 |
+
[`~FlaxPreTrainedModel.to_bf16`].
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
CLIP_TEXT_INPUTS_DOCSTRING = r"""
|
| 77 |
+
Args:
|
| 78 |
+
input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`):
|
| 79 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 80 |
+
it.
|
| 81 |
+
|
| 82 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 83 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 84 |
+
|
| 85 |
+
[What are input IDs?](../glossary#input-ids)
|
| 86 |
+
attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
| 87 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 88 |
+
|
| 89 |
+
- 1 for tokens that are **not masked**,
|
| 90 |
+
- 0 for tokens that are **masked**.
|
| 91 |
+
|
| 92 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 93 |
+
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
| 94 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 95 |
+
config.max_position_embeddings - 1]`.
|
| 96 |
+
|
| 97 |
+
[What are position IDs?](../glossary#position-ids)
|
| 98 |
+
output_attentions (`bool`, *optional*):
|
| 99 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 100 |
+
tensors for more detail.
|
| 101 |
+
output_hidden_states (`bool`, *optional*):
|
| 102 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 103 |
+
more detail.
|
| 104 |
+
return_dict (`bool`, *optional*):
|
| 105 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
CLIP_VISION_INPUTS_DOCSTRING = r"""
|
| 109 |
+
Args:
|
| 110 |
+
pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
|
| 111 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
| 112 |
+
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| 113 |
+
output_attentions (`bool`, *optional*):
|
| 114 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 115 |
+
tensors for more detail.
|
| 116 |
+
output_hidden_states (`bool`, *optional*):
|
| 117 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 118 |
+
more detail.
|
| 119 |
+
return_dict (`bool`, *optional*):
|
| 120 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
CLIP_INPUTS_DOCSTRING = r"""
|
| 124 |
+
Args:
|
| 125 |
+
input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`):
|
| 126 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 127 |
+
it.
|
| 128 |
+
|
| 129 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 130 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 131 |
+
|
| 132 |
+
[What are input IDs?](../glossary#input-ids)
|
| 133 |
+
attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
| 134 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 135 |
+
|
| 136 |
+
- 1 for tokens that are **not masked**,
|
| 137 |
+
- 0 for tokens that are **masked**.
|
| 138 |
+
|
| 139 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 140 |
+
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
| 141 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 142 |
+
config.max_position_embeddings - 1]`.
|
| 143 |
+
|
| 144 |
+
[What are position IDs?](../glossary#position-ids)
|
| 145 |
+
pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
|
| 146 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
| 147 |
+
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| 148 |
+
output_attentions (`bool`, *optional*):
|
| 149 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 150 |
+
tensors for more detail.
|
| 151 |
+
output_hidden_states (`bool`, *optional*):
|
| 152 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 153 |
+
more detail.
|
| 154 |
+
return_dict (`bool`, *optional*):
|
| 155 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 156 |
+
"""
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
@flax.struct.dataclass
|
| 160 |
+
class FlaxCLIPTextModelOutput(ModelOutput):
|
| 161 |
+
"""
|
| 162 |
+
Base class for text model's outputs that also contains a pooling of the last hidden states.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
text_embeds (`jnp.ndarray` of shape `(batch_size, output_dim`):
|
| 166 |
+
The text embeddings obtained by applying the projection layer to the pooled output of
|
| 167 |
+
[`FlaxCLIPTextModel`].
|
| 168 |
+
last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 169 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 170 |
+
hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 171 |
+
Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
|
| 172 |
+
`(batch_size, sequence_length, hidden_size)`.
|
| 173 |
+
|
| 174 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
| 175 |
+
attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 176 |
+
Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| 177 |
+
sequence_length)`.
|
| 178 |
+
|
| 179 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| 180 |
+
heads.
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
text_embeds: jnp.ndarray = None
|
| 184 |
+
last_hidden_state: jnp.ndarray = None
|
| 185 |
+
hidden_states: Optional[Tuple[jnp.ndarray, ...]] = None
|
| 186 |
+
attentions: Optional[Tuple[jnp.ndarray, ...]] = None
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
@flax.struct.dataclass
|
| 190 |
+
class FlaxCLIPOutput(ModelOutput):
|
| 191 |
+
"""
|
| 192 |
+
Args:
|
| 193 |
+
logits_per_image:(`jnp.ndarray` of shape `(image_batch_size, text_batch_size)`):
|
| 194 |
+
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
|
| 195 |
+
similarity scores.
|
| 196 |
+
logits_per_text:(`jnp.ndarray` of shape `(text_batch_size, image_batch_size)`):
|
| 197 |
+
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
|
| 198 |
+
similarity scores.
|
| 199 |
+
text_embeds(`jnp.ndarray` of shape `(batch_size, output_dim`):
|
| 200 |
+
The text embeddings obtained by applying the projection layer to the pooled output of
|
| 201 |
+
[`FlaxCLIPTextModel`].
|
| 202 |
+
image_embeds(`jnp.ndarray` of shape `(batch_size, output_dim`):
|
| 203 |
+
The image embeddings obtained by applying the projection layer to the pooled output of
|
| 204 |
+
[`FlaxCLIPVisionModel`].
|
| 205 |
+
text_model_output(`FlaxBaseModelOutputWithPooling`):
|
| 206 |
+
The output of the [`FlaxCLIPTextModel`].
|
| 207 |
+
vision_model_output(`FlaxBaseModelOutputWithPooling`):
|
| 208 |
+
The output of the [`FlaxCLIPVisionModel`].
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
logits_per_image: jnp.ndarray = None
|
| 212 |
+
logits_per_text: jnp.ndarray = None
|
| 213 |
+
text_embeds: jnp.ndarray = None
|
| 214 |
+
image_embeds: jnp.ndarray = None
|
| 215 |
+
text_model_output: FlaxBaseModelOutputWithPooling = None
|
| 216 |
+
vision_model_output: FlaxBaseModelOutputWithPooling = None
|
| 217 |
+
|
| 218 |
+
def to_tuple(self) -> Tuple[Any]:
|
| 219 |
+
return tuple(
|
| 220 |
+
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
|
| 221 |
+
for k in self.keys()
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
class FlaxCLIPVisionEmbeddings(nn.Module):
|
| 226 |
+
config: CLIPVisionConfig
|
| 227 |
+
dtype: jnp.dtype = jnp.float32
|
| 228 |
+
|
| 229 |
+
def setup(self):
|
| 230 |
+
embed_dim = self.config.hidden_size
|
| 231 |
+
image_size = self.config.image_size
|
| 232 |
+
patch_size = self.config.patch_size
|
| 233 |
+
|
| 234 |
+
self.class_embedding = self.param("class_embedding", jax.nn.initializers.normal(stddev=0.02), (embed_dim,))
|
| 235 |
+
|
| 236 |
+
self.patch_embedding = nn.Conv(
|
| 237 |
+
embed_dim,
|
| 238 |
+
kernel_size=(patch_size, patch_size),
|
| 239 |
+
strides=(patch_size, patch_size),
|
| 240 |
+
padding="VALID",
|
| 241 |
+
use_bias=False,
|
| 242 |
+
dtype=self.dtype,
|
| 243 |
+
kernel_init=jax.nn.initializers.normal(),
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
self.num_patches = (image_size // patch_size) ** 2
|
| 247 |
+
num_positions = self.num_patches + 1
|
| 248 |
+
self.position_embedding = nn.Embed(num_positions, embed_dim, embedding_init=jax.nn.initializers.normal())
|
| 249 |
+
self.position_ids = jnp.expand_dims(jnp.arange(0, num_positions, dtype="i4"), axis=0)
|
| 250 |
+
|
| 251 |
+
def __call__(self, pixel_values):
|
| 252 |
+
patch_embeds = self.patch_embedding(pixel_values)
|
| 253 |
+
batch_size, height, width, channels = patch_embeds.shape
|
| 254 |
+
patch_embeds = jnp.reshape(patch_embeds, (batch_size, height * width, channels))
|
| 255 |
+
|
| 256 |
+
class_embeds = jnp.expand_dims(self.class_embedding, axis=(0, 1))
|
| 257 |
+
class_embeds = jnp.tile(class_embeds, (batch_size, 1, 1))
|
| 258 |
+
embeddings = jnp.concatenate([class_embeds, patch_embeds], axis=1)
|
| 259 |
+
embeddings = embeddings + self.position_embedding(self.position_ids)
|
| 260 |
+
return embeddings
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
class FlaxCLIPTextEmbeddings(nn.Module):
|
| 264 |
+
config: CLIPTextConfig
|
| 265 |
+
dtype: jnp.dtype = jnp.float32
|
| 266 |
+
|
| 267 |
+
def setup(self):
|
| 268 |
+
embed_dim = self.config.hidden_size
|
| 269 |
+
|
| 270 |
+
self.token_embedding = nn.Embed(self.config.vocab_size, embed_dim, embedding_init=jax.nn.initializers.normal())
|
| 271 |
+
self.position_embedding = nn.Embed(
|
| 272 |
+
self.config.max_position_embeddings, embed_dim, embedding_init=jax.nn.initializers.normal()
|
| 273 |
+
)
|
| 274 |
+
self.position_ids = jnp.expand_dims(
|
| 275 |
+
jnp.arange(0, self.config.max_position_embeddings, dtype="i4"), axis=(0, 1)
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
def __call__(self, input_ids, position_ids):
|
| 279 |
+
input_embeds = self.token_embedding(input_ids.astype("i4"))
|
| 280 |
+
position_embeds = self.position_embedding(position_ids.astype("i4"))
|
| 281 |
+
|
| 282 |
+
embeddings = input_embeds + position_embeds
|
| 283 |
+
return embeddings
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
class FlaxCLIPAttention(nn.Module):
|
| 287 |
+
config: Union[CLIPTextConfig, CLIPVisionConfig]
|
| 288 |
+
dtype: jnp.dtype = jnp.float32
|
| 289 |
+
|
| 290 |
+
def setup(self):
|
| 291 |
+
self.embed_dim = self.config.hidden_size
|
| 292 |
+
self.num_heads = self.config.num_attention_heads
|
| 293 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 294 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
| 295 |
+
raise ValueError(
|
| 296 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
| 297 |
+
f" {self.num_heads})."
|
| 298 |
+
)
|
| 299 |
+
self.scale = self.head_dim**-0.5
|
| 300 |
+
self.dropout = self.config.attention_dropout
|
| 301 |
+
|
| 302 |
+
self.k_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01))
|
| 303 |
+
self.v_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01))
|
| 304 |
+
self.q_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01))
|
| 305 |
+
self.out_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01))
|
| 306 |
+
|
| 307 |
+
self.causal = isinstance(self.config, CLIPTextConfig)
|
| 308 |
+
if self.causal:
|
| 309 |
+
self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_position_embeddings), dtype="i4"))
|
| 310 |
+
|
| 311 |
+
def _split_heads(self, hidden_states):
|
| 312 |
+
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
|
| 313 |
+
|
| 314 |
+
def _merge_heads(self, hidden_states):
|
| 315 |
+
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
|
| 316 |
+
|
| 317 |
+
def __call__(
|
| 318 |
+
self,
|
| 319 |
+
hidden_states,
|
| 320 |
+
attention_mask=None,
|
| 321 |
+
deterministic: bool = True,
|
| 322 |
+
output_attentions: bool = False,
|
| 323 |
+
):
|
| 324 |
+
query = self.q_proj(hidden_states)
|
| 325 |
+
key = self.k_proj(hidden_states)
|
| 326 |
+
value = self.v_proj(hidden_states)
|
| 327 |
+
|
| 328 |
+
query = self._split_heads(query)
|
| 329 |
+
key = self._split_heads(key)
|
| 330 |
+
value = self._split_heads(value)
|
| 331 |
+
|
| 332 |
+
causal_attention_mask = None
|
| 333 |
+
if self.causal:
|
| 334 |
+
query_length, key_length = query.shape[1], key.shape[1]
|
| 335 |
+
causal_attention_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length]
|
| 336 |
+
|
| 337 |
+
if attention_mask is not None and causal_attention_mask is not None:
|
| 338 |
+
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
|
| 339 |
+
attention_mask = combine_masks(attention_mask, causal_attention_mask, dtype="i4")
|
| 340 |
+
elif causal_attention_mask is not None:
|
| 341 |
+
attention_mask = causal_attention_mask
|
| 342 |
+
elif attention_mask is not None:
|
| 343 |
+
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
|
| 344 |
+
|
| 345 |
+
if attention_mask is not None:
|
| 346 |
+
attention_bias = lax.select(
|
| 347 |
+
attention_mask > 0,
|
| 348 |
+
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
|
| 349 |
+
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
|
| 350 |
+
)
|
| 351 |
+
else:
|
| 352 |
+
attention_bias = None
|
| 353 |
+
|
| 354 |
+
dropout_rng = None
|
| 355 |
+
if not deterministic and self.dropout > 0.0:
|
| 356 |
+
dropout_rng = self.make_rng("dropout")
|
| 357 |
+
|
| 358 |
+
attn_weights = dot_product_attention_weights(
|
| 359 |
+
query,
|
| 360 |
+
key,
|
| 361 |
+
bias=attention_bias,
|
| 362 |
+
dropout_rng=dropout_rng,
|
| 363 |
+
dropout_rate=self.dropout,
|
| 364 |
+
deterministic=deterministic,
|
| 365 |
+
dtype=self.dtype,
|
| 366 |
+
precision=None,
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
|
| 370 |
+
attn_output = self._merge_heads(attn_output)
|
| 371 |
+
attn_output = self.out_proj(attn_output)
|
| 372 |
+
|
| 373 |
+
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
|
| 374 |
+
return outputs
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
class FlaxCLIPMLP(nn.Module):
|
| 378 |
+
config: Union[CLIPTextConfig, CLIPVisionConfig]
|
| 379 |
+
dtype: jnp.dtype = jnp.float32
|
| 380 |
+
|
| 381 |
+
def setup(self):
|
| 382 |
+
self.activation_fn = ACT2FN[self.config.hidden_act]
|
| 383 |
+
self.fc1 = nn.Dense(
|
| 384 |
+
self.config.intermediate_size,
|
| 385 |
+
dtype=self.dtype,
|
| 386 |
+
kernel_init=jax.nn.initializers.normal(0.01),
|
| 387 |
+
)
|
| 388 |
+
self.fc2 = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01))
|
| 389 |
+
|
| 390 |
+
def __call__(self, hidden_states):
|
| 391 |
+
hidden_states = self.fc1(hidden_states)
|
| 392 |
+
hidden_states = self.activation_fn(hidden_states)
|
| 393 |
+
hidden_states = self.fc2(hidden_states)
|
| 394 |
+
return hidden_states
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
class FlaxCLIPEncoderLayer(nn.Module):
|
| 398 |
+
config: Union[CLIPTextConfig, CLIPVisionConfig]
|
| 399 |
+
dtype: jnp.dtype = jnp.float32
|
| 400 |
+
|
| 401 |
+
def setup(self):
|
| 402 |
+
self.self_attn = FlaxCLIPAttention(self.config, dtype=self.dtype)
|
| 403 |
+
self.layer_norm1 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
| 404 |
+
self.mlp = FlaxCLIPMLP(self.config, dtype=self.dtype)
|
| 405 |
+
self.layer_norm2 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
| 406 |
+
|
| 407 |
+
def __call__(
|
| 408 |
+
self,
|
| 409 |
+
hidden_states,
|
| 410 |
+
attention_mask,
|
| 411 |
+
deterministic: bool = True,
|
| 412 |
+
output_attentions: bool = False,
|
| 413 |
+
):
|
| 414 |
+
residual = hidden_states
|
| 415 |
+
|
| 416 |
+
hidden_states = self.layer_norm1(hidden_states)
|
| 417 |
+
attn_outputs = self.self_attn(
|
| 418 |
+
hidden_states=hidden_states,
|
| 419 |
+
attention_mask=attention_mask,
|
| 420 |
+
deterministic=deterministic,
|
| 421 |
+
output_attentions=output_attentions,
|
| 422 |
+
)
|
| 423 |
+
hidden_states = attn_outputs[0]
|
| 424 |
+
hidden_states = residual + hidden_states
|
| 425 |
+
|
| 426 |
+
residual = hidden_states
|
| 427 |
+
hidden_states = self.layer_norm2(hidden_states)
|
| 428 |
+
hidden_states = self.mlp(hidden_states)
|
| 429 |
+
hidden_states = residual + hidden_states
|
| 430 |
+
|
| 431 |
+
outputs = (hidden_states,)
|
| 432 |
+
|
| 433 |
+
if output_attentions:
|
| 434 |
+
outputs += attn_outputs[1:]
|
| 435 |
+
|
| 436 |
+
return outputs
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
class FlaxCLIPLayerCollection(nn.Module):
|
| 440 |
+
config: Union[CLIPTextConfig, CLIPVisionConfig]
|
| 441 |
+
dtype: jnp.dtype = jnp.float32
|
| 442 |
+
|
| 443 |
+
def setup(self):
|
| 444 |
+
self.layers = [
|
| 445 |
+
FlaxCLIPEncoderLayer(self.config, name=str(i), dtype=self.dtype)
|
| 446 |
+
for i in range(self.config.num_hidden_layers)
|
| 447 |
+
]
|
| 448 |
+
|
| 449 |
+
def __call__(
|
| 450 |
+
self,
|
| 451 |
+
hidden_states,
|
| 452 |
+
attention_mask=None,
|
| 453 |
+
deterministic: bool = True,
|
| 454 |
+
output_attentions: bool = False,
|
| 455 |
+
output_hidden_states: bool = False,
|
| 456 |
+
return_dict: bool = True,
|
| 457 |
+
):
|
| 458 |
+
all_attentions = () if output_attentions else None
|
| 459 |
+
all_hidden_states = () if output_hidden_states else None
|
| 460 |
+
|
| 461 |
+
for layer in self.layers:
|
| 462 |
+
if output_hidden_states:
|
| 463 |
+
all_hidden_states += (hidden_states,)
|
| 464 |
+
|
| 465 |
+
layer_outputs = layer(
|
| 466 |
+
hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions
|
| 467 |
+
)
|
| 468 |
+
hidden_states = layer_outputs[0]
|
| 469 |
+
|
| 470 |
+
if output_attentions:
|
| 471 |
+
all_attentions += (layer_outputs[1],)
|
| 472 |
+
|
| 473 |
+
if output_hidden_states:
|
| 474 |
+
all_hidden_states += (hidden_states,)
|
| 475 |
+
|
| 476 |
+
outputs = (hidden_states,)
|
| 477 |
+
|
| 478 |
+
if not return_dict:
|
| 479 |
+
return tuple(v for v in outputs if v is not None)
|
| 480 |
+
|
| 481 |
+
return FlaxBaseModelOutput(
|
| 482 |
+
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
class FlaxCLIPEncoder(nn.Module):
|
| 487 |
+
config: Union[CLIPTextConfig, CLIPVisionConfig]
|
| 488 |
+
dtype: jnp.dtype = jnp.float32
|
| 489 |
+
|
| 490 |
+
def setup(self):
|
| 491 |
+
self.layers = FlaxCLIPLayerCollection(self.config, dtype=self.dtype)
|
| 492 |
+
|
| 493 |
+
def __call__(
|
| 494 |
+
self,
|
| 495 |
+
inputs_embeds,
|
| 496 |
+
attention_mask=None,
|
| 497 |
+
deterministic: bool = True,
|
| 498 |
+
output_attentions: bool = False,
|
| 499 |
+
output_hidden_states: bool = False,
|
| 500 |
+
return_dict: bool = True,
|
| 501 |
+
):
|
| 502 |
+
return self.layers(
|
| 503 |
+
hidden_states=inputs_embeds,
|
| 504 |
+
attention_mask=attention_mask,
|
| 505 |
+
deterministic=deterministic,
|
| 506 |
+
output_attentions=output_attentions,
|
| 507 |
+
output_hidden_states=output_hidden_states,
|
| 508 |
+
return_dict=return_dict,
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
class FlaxCLIPTextTransformer(nn.Module):
|
| 513 |
+
config: CLIPTextConfig
|
| 514 |
+
dtype: jnp.dtype = jnp.float32
|
| 515 |
+
|
| 516 |
+
def setup(self):
|
| 517 |
+
self.embeddings = FlaxCLIPTextEmbeddings(self.config, dtype=self.dtype)
|
| 518 |
+
self.encoder = FlaxCLIPEncoder(self.config, dtype=self.dtype)
|
| 519 |
+
self.final_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
| 520 |
+
|
| 521 |
+
# For `pooled_output` computation
|
| 522 |
+
self.eos_token_id = self.config.eos_token_id
|
| 523 |
+
|
| 524 |
+
def __call__(
|
| 525 |
+
self,
|
| 526 |
+
input_ids,
|
| 527 |
+
attention_mask,
|
| 528 |
+
position_ids,
|
| 529 |
+
deterministic: bool = True,
|
| 530 |
+
output_attentions: bool = False,
|
| 531 |
+
output_hidden_states: bool = False,
|
| 532 |
+
return_dict: bool = True,
|
| 533 |
+
):
|
| 534 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 535 |
+
output_hidden_states = (
|
| 536 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 537 |
+
)
|
| 538 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 539 |
+
|
| 540 |
+
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
|
| 541 |
+
|
| 542 |
+
encoder_outputs = self.encoder(
|
| 543 |
+
inputs_embeds=hidden_states,
|
| 544 |
+
attention_mask=attention_mask,
|
| 545 |
+
deterministic=deterministic,
|
| 546 |
+
output_attentions=output_attentions,
|
| 547 |
+
output_hidden_states=output_hidden_states,
|
| 548 |
+
return_dict=return_dict,
|
| 549 |
+
)
|
| 550 |
+
|
| 551 |
+
last_hidden_state = encoder_outputs[0]
|
| 552 |
+
last_hidden_state = self.final_layer_norm(last_hidden_state)
|
| 553 |
+
|
| 554 |
+
if self.eos_token_id == 2:
|
| 555 |
+
# The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
|
| 556 |
+
# A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
|
| 557 |
+
# ------------------------------------------------------------
|
| 558 |
+
# text_embeds.shape = [batch_size, sequence_length, transformer.width]
|
| 559 |
+
# take features from the EOS embedding (eos_token_id is the highest number in each sequence)
|
| 560 |
+
pooled_output = last_hidden_state[jnp.arange(last_hidden_state.shape[0]), input_ids.argmax(axis=-1)]
|
| 561 |
+
else:
|
| 562 |
+
# (no need to cast from bool to int after comparing to `eos_token_id`)
|
| 563 |
+
pooled_output = last_hidden_state[
|
| 564 |
+
jnp.arange(last_hidden_state.shape[0]), (input_ids == self.eos_token_id).argmax(axis=-1)
|
| 565 |
+
]
|
| 566 |
+
|
| 567 |
+
if not return_dict:
|
| 568 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 569 |
+
|
| 570 |
+
return FlaxBaseModelOutputWithPooling(
|
| 571 |
+
last_hidden_state=last_hidden_state,
|
| 572 |
+
pooler_output=pooled_output,
|
| 573 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 574 |
+
attentions=encoder_outputs.attentions,
|
| 575 |
+
)
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
class FlaxCLIPVisionTransformer(nn.Module):
|
| 579 |
+
config: CLIPVisionConfig
|
| 580 |
+
dtype: jnp.dtype = jnp.float32
|
| 581 |
+
|
| 582 |
+
def setup(self):
|
| 583 |
+
self.embeddings = FlaxCLIPVisionEmbeddings(self.config, dtype=self.dtype)
|
| 584 |
+
self.pre_layrnorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
| 585 |
+
self.encoder = FlaxCLIPEncoder(self.config, dtype=self.dtype)
|
| 586 |
+
self.post_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
| 587 |
+
|
| 588 |
+
def __call__(
|
| 589 |
+
self,
|
| 590 |
+
pixel_values=None,
|
| 591 |
+
deterministic: bool = True,
|
| 592 |
+
output_attentions=None,
|
| 593 |
+
output_hidden_states=None,
|
| 594 |
+
return_dict: bool = True,
|
| 595 |
+
):
|
| 596 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 597 |
+
output_hidden_states = (
|
| 598 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 599 |
+
)
|
| 600 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 601 |
+
|
| 602 |
+
hidden_states = self.embeddings(pixel_values)
|
| 603 |
+
hidden_states = self.pre_layrnorm(hidden_states)
|
| 604 |
+
|
| 605 |
+
encoder_outputs = self.encoder(
|
| 606 |
+
inputs_embeds=hidden_states,
|
| 607 |
+
deterministic=deterministic,
|
| 608 |
+
output_attentions=output_attentions,
|
| 609 |
+
output_hidden_states=output_hidden_states,
|
| 610 |
+
return_dict=return_dict,
|
| 611 |
+
)
|
| 612 |
+
|
| 613 |
+
last_hidden_state = encoder_outputs[0]
|
| 614 |
+
pooled_output = last_hidden_state[:, 0, :]
|
| 615 |
+
pooled_output = self.post_layernorm(pooled_output)
|
| 616 |
+
|
| 617 |
+
if not return_dict:
|
| 618 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 619 |
+
|
| 620 |
+
return FlaxBaseModelOutputWithPooling(
|
| 621 |
+
last_hidden_state=last_hidden_state,
|
| 622 |
+
pooler_output=pooled_output,
|
| 623 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 624 |
+
attentions=encoder_outputs.attentions,
|
| 625 |
+
)
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
class FlaxCLIPTextPreTrainedModel(FlaxPreTrainedModel):
|
| 629 |
+
config_class = CLIPTextConfig
|
| 630 |
+
module_class: nn.Module = None
|
| 631 |
+
|
| 632 |
+
def __init__(
|
| 633 |
+
self,
|
| 634 |
+
config: CLIPTextConfig,
|
| 635 |
+
input_shape=(1, 1),
|
| 636 |
+
seed: int = 0,
|
| 637 |
+
dtype: jnp.dtype = jnp.float32,
|
| 638 |
+
_do_init: bool = True,
|
| 639 |
+
**kwargs,
|
| 640 |
+
):
|
| 641 |
+
module = self.module_class(config=config, dtype=dtype, **kwargs)
|
| 642 |
+
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
|
| 643 |
+
|
| 644 |
+
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
|
| 645 |
+
# init input tensor
|
| 646 |
+
input_ids = jnp.zeros(input_shape, dtype="i4")
|
| 647 |
+
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
|
| 648 |
+
attention_mask = jnp.ones_like(input_ids)
|
| 649 |
+
|
| 650 |
+
params_rng, dropout_rng = jax.random.split(rng)
|
| 651 |
+
rngs = {"params": params_rng, "dropout": dropout_rng}
|
| 652 |
+
|
| 653 |
+
random_params = self.module.init(rngs, input_ids, attention_mask, position_ids)["params"]
|
| 654 |
+
|
| 655 |
+
if params is not None:
|
| 656 |
+
random_params = flatten_dict(unfreeze(random_params))
|
| 657 |
+
params = flatten_dict(unfreeze(params))
|
| 658 |
+
for missing_key in self._missing_keys:
|
| 659 |
+
params[missing_key] = random_params[missing_key]
|
| 660 |
+
self._missing_keys = set()
|
| 661 |
+
return freeze(unflatten_dict(params))
|
| 662 |
+
else:
|
| 663 |
+
return random_params
|
| 664 |
+
|
| 665 |
+
def __call__(
|
| 666 |
+
self,
|
| 667 |
+
input_ids,
|
| 668 |
+
attention_mask=None,
|
| 669 |
+
position_ids=None,
|
| 670 |
+
params: dict = None,
|
| 671 |
+
dropout_rng: jax.random.PRNGKey = None,
|
| 672 |
+
train: bool = False,
|
| 673 |
+
output_attentions: Optional[bool] = None,
|
| 674 |
+
output_hidden_states: Optional[bool] = None,
|
| 675 |
+
return_dict: Optional[bool] = None,
|
| 676 |
+
):
|
| 677 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 678 |
+
output_hidden_states = (
|
| 679 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 680 |
+
)
|
| 681 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
| 682 |
+
|
| 683 |
+
if position_ids is None:
|
| 684 |
+
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
|
| 685 |
+
|
| 686 |
+
if attention_mask is None:
|
| 687 |
+
attention_mask = jnp.ones_like(input_ids)
|
| 688 |
+
|
| 689 |
+
# Handle any PRNG if needed
|
| 690 |
+
rngs = {}
|
| 691 |
+
if dropout_rng is not None:
|
| 692 |
+
rngs["dropout"] = dropout_rng
|
| 693 |
+
|
| 694 |
+
return self.module.apply(
|
| 695 |
+
{"params": params or self.params},
|
| 696 |
+
jnp.array(input_ids, dtype="i4"),
|
| 697 |
+
jnp.array(attention_mask, dtype="i4"),
|
| 698 |
+
jnp.array(position_ids, dtype="i4"),
|
| 699 |
+
not train,
|
| 700 |
+
output_attentions,
|
| 701 |
+
output_hidden_states,
|
| 702 |
+
return_dict,
|
| 703 |
+
rngs=rngs,
|
| 704 |
+
)
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
class FlaxCLIPVisionPreTrainedModel(FlaxPreTrainedModel):
|
| 708 |
+
config_class = CLIPVisionConfig
|
| 709 |
+
main_input_name = "pixel_values"
|
| 710 |
+
module_class: nn.Module = None
|
| 711 |
+
|
| 712 |
+
def __init__(
|
| 713 |
+
self,
|
| 714 |
+
config: CLIPVisionConfig,
|
| 715 |
+
input_shape: Optional[Tuple] = None,
|
| 716 |
+
seed: int = 0,
|
| 717 |
+
dtype: jnp.dtype = jnp.float32,
|
| 718 |
+
_do_init: bool = True,
|
| 719 |
+
**kwargs,
|
| 720 |
+
):
|
| 721 |
+
if input_shape is None:
|
| 722 |
+
input_shape = (1, config.image_size, config.image_size, 3)
|
| 723 |
+
module = self.module_class(config=config, dtype=dtype, **kwargs)
|
| 724 |
+
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
|
| 725 |
+
|
| 726 |
+
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
|
| 727 |
+
# init input tensor
|
| 728 |
+
pixel_values = jax.random.normal(rng, input_shape)
|
| 729 |
+
|
| 730 |
+
params_rng, dropout_rng = jax.random.split(rng)
|
| 731 |
+
rngs = {"params": params_rng, "dropout": dropout_rng}
|
| 732 |
+
|
| 733 |
+
random_params = self.module.init(rngs, pixel_values)["params"]
|
| 734 |
+
|
| 735 |
+
if params is not None:
|
| 736 |
+
random_params = flatten_dict(unfreeze(random_params))
|
| 737 |
+
params = flatten_dict(unfreeze(params))
|
| 738 |
+
for missing_key in self._missing_keys:
|
| 739 |
+
params[missing_key] = random_params[missing_key]
|
| 740 |
+
self._missing_keys = set()
|
| 741 |
+
return freeze(unflatten_dict(params))
|
| 742 |
+
else:
|
| 743 |
+
return random_params
|
| 744 |
+
|
| 745 |
+
def __call__(
|
| 746 |
+
self,
|
| 747 |
+
pixel_values,
|
| 748 |
+
params: dict = None,
|
| 749 |
+
dropout_rng: jax.random.PRNGKey = None,
|
| 750 |
+
train: bool = False,
|
| 751 |
+
output_attentions: Optional[bool] = None,
|
| 752 |
+
output_hidden_states: Optional[bool] = None,
|
| 753 |
+
return_dict: Optional[bool] = None,
|
| 754 |
+
):
|
| 755 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 756 |
+
output_hidden_states = (
|
| 757 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 758 |
+
)
|
| 759 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
| 760 |
+
|
| 761 |
+
pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
|
| 762 |
+
|
| 763 |
+
# Handle any PRNG if needed
|
| 764 |
+
rngs = {}
|
| 765 |
+
if dropout_rng is not None:
|
| 766 |
+
rngs["dropout"] = dropout_rng
|
| 767 |
+
|
| 768 |
+
return self.module.apply(
|
| 769 |
+
{"params": params or self.params},
|
| 770 |
+
jnp.array(pixel_values, dtype=jnp.float32),
|
| 771 |
+
not train,
|
| 772 |
+
output_attentions,
|
| 773 |
+
output_hidden_states,
|
| 774 |
+
return_dict,
|
| 775 |
+
rngs=rngs,
|
| 776 |
+
)
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
class FlaxCLIPPreTrainedModel(FlaxPreTrainedModel):
|
| 780 |
+
config_class = CLIPConfig
|
| 781 |
+
module_class: nn.Module = None
|
| 782 |
+
|
| 783 |
+
def __init__(
|
| 784 |
+
self,
|
| 785 |
+
config: CLIPConfig,
|
| 786 |
+
input_shape: Optional[Tuple] = None,
|
| 787 |
+
seed: int = 0,
|
| 788 |
+
dtype: jnp.dtype = jnp.float32,
|
| 789 |
+
_do_init: bool = True,
|
| 790 |
+
**kwargs,
|
| 791 |
+
):
|
| 792 |
+
if input_shape is None:
|
| 793 |
+
input_shape = ((1, 1), (1, config.vision_config.image_size, config.vision_config.image_size, 3))
|
| 794 |
+
module = self.module_class(config=config, dtype=dtype, **kwargs)
|
| 795 |
+
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
|
| 796 |
+
|
| 797 |
+
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
|
| 798 |
+
# init input tensor
|
| 799 |
+
input_ids = jnp.zeros(input_shape[0], dtype="i4")
|
| 800 |
+
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape[0])
|
| 801 |
+
attention_mask = jnp.ones_like(input_ids)
|
| 802 |
+
|
| 803 |
+
pixel_values = jax.random.normal(rng, input_shape[1])
|
| 804 |
+
|
| 805 |
+
params_rng, dropout_rng = jax.random.split(rng)
|
| 806 |
+
rngs = {"params": params_rng, "dropout": dropout_rng}
|
| 807 |
+
|
| 808 |
+
random_params = self.module.init(rngs, input_ids, pixel_values, attention_mask, position_ids)["params"]
|
| 809 |
+
|
| 810 |
+
if params is not None:
|
| 811 |
+
random_params = flatten_dict(unfreeze(random_params))
|
| 812 |
+
params = flatten_dict(unfreeze(params))
|
| 813 |
+
for missing_key in self._missing_keys:
|
| 814 |
+
params[missing_key] = random_params[missing_key]
|
| 815 |
+
self._missing_keys = set()
|
| 816 |
+
return freeze(unflatten_dict(params))
|
| 817 |
+
else:
|
| 818 |
+
return random_params
|
| 819 |
+
|
| 820 |
+
def __call__(
|
| 821 |
+
self,
|
| 822 |
+
input_ids,
|
| 823 |
+
pixel_values,
|
| 824 |
+
attention_mask=None,
|
| 825 |
+
position_ids=None,
|
| 826 |
+
params: dict = None,
|
| 827 |
+
dropout_rng: jax.random.PRNGKey = None,
|
| 828 |
+
train: bool = False,
|
| 829 |
+
output_attentions: Optional[bool] = None,
|
| 830 |
+
output_hidden_states: Optional[bool] = None,
|
| 831 |
+
return_dict: Optional[bool] = None,
|
| 832 |
+
):
|
| 833 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 834 |
+
output_hidden_states = (
|
| 835 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 836 |
+
)
|
| 837 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
| 838 |
+
|
| 839 |
+
if position_ids is None:
|
| 840 |
+
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
|
| 841 |
+
|
| 842 |
+
if attention_mask is None:
|
| 843 |
+
attention_mask = jnp.ones_like(input_ids)
|
| 844 |
+
|
| 845 |
+
pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
|
| 846 |
+
|
| 847 |
+
# Handle any PRNG if needed
|
| 848 |
+
rngs = {}
|
| 849 |
+
if dropout_rng is not None:
|
| 850 |
+
rngs["dropout"] = dropout_rng
|
| 851 |
+
|
| 852 |
+
return self.module.apply(
|
| 853 |
+
{"params": params or self.params},
|
| 854 |
+
jnp.array(input_ids, dtype="i4"),
|
| 855 |
+
jnp.array(pixel_values, dtype=jnp.float32),
|
| 856 |
+
jnp.array(attention_mask, dtype="i4"),
|
| 857 |
+
jnp.array(position_ids, dtype="i4"),
|
| 858 |
+
not train,
|
| 859 |
+
output_attentions,
|
| 860 |
+
output_hidden_states,
|
| 861 |
+
return_dict,
|
| 862 |
+
rngs=rngs,
|
| 863 |
+
)
|
| 864 |
+
|
| 865 |
+
def get_text_features(
|
| 866 |
+
self,
|
| 867 |
+
input_ids,
|
| 868 |
+
attention_mask=None,
|
| 869 |
+
position_ids=None,
|
| 870 |
+
params: dict = None,
|
| 871 |
+
dropout_rng: jax.random.PRNGKey = None,
|
| 872 |
+
train=False,
|
| 873 |
+
):
|
| 874 |
+
r"""
|
| 875 |
+
Args:
|
| 876 |
+
input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`):
|
| 877 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
|
| 878 |
+
provide it.
|
| 879 |
+
|
| 880 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 881 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 882 |
+
|
| 883 |
+
[What are input IDs?](../glossary#input-ids)
|
| 884 |
+
|
| 885 |
+
Returns:
|
| 886 |
+
text_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
|
| 887 |
+
the projection layer to the pooled output of [`FlaxCLIPTextModel`].
|
| 888 |
+
|
| 889 |
+
Examples:
|
| 890 |
+
|
| 891 |
+
```python
|
| 892 |
+
>>> from transformers import AutoTokenizer, FlaxCLIPModel
|
| 893 |
+
|
| 894 |
+
>>> model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 895 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 896 |
+
|
| 897 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np")
|
| 898 |
+
>>> text_features = model.get_text_features(**inputs)
|
| 899 |
+
```"""
|
| 900 |
+
if position_ids is None:
|
| 901 |
+
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
|
| 902 |
+
|
| 903 |
+
if attention_mask is None:
|
| 904 |
+
attention_mask = jnp.ones_like(input_ids)
|
| 905 |
+
|
| 906 |
+
# Handle any PRNG if needed
|
| 907 |
+
rngs = {}
|
| 908 |
+
if dropout_rng is not None:
|
| 909 |
+
rngs["dropout"] = dropout_rng
|
| 910 |
+
|
| 911 |
+
def _get_features(module, input_ids, attention_mask, position_ids, deterministic):
|
| 912 |
+
text_outputs = module.text_model(
|
| 913 |
+
input_ids=input_ids,
|
| 914 |
+
attention_mask=attention_mask,
|
| 915 |
+
position_ids=position_ids,
|
| 916 |
+
deterministic=deterministic,
|
| 917 |
+
)
|
| 918 |
+
pooled_output = text_outputs[1]
|
| 919 |
+
text_features = module.text_projection(pooled_output)
|
| 920 |
+
return text_features
|
| 921 |
+
|
| 922 |
+
return self.module.apply(
|
| 923 |
+
{"params": params or self.params},
|
| 924 |
+
jnp.array(input_ids, dtype="i4"),
|
| 925 |
+
jnp.array(attention_mask, dtype="i4"),
|
| 926 |
+
jnp.array(position_ids, dtype="i4"),
|
| 927 |
+
not train,
|
| 928 |
+
method=_get_features,
|
| 929 |
+
rngs=rngs,
|
| 930 |
+
)
|
| 931 |
+
|
| 932 |
+
def get_image_features(
|
| 933 |
+
self, pixel_values, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train=False
|
| 934 |
+
):
|
| 935 |
+
r"""
|
| 936 |
+
Args:
|
| 937 |
+
pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
|
| 938 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained
|
| 939 |
+
using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| 940 |
+
|
| 941 |
+
Returns:
|
| 942 |
+
image_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The image embeddings obtained by
|
| 943 |
+
applying the projection layer to the pooled output of [`FlaxCLIPVisionModel`]
|
| 944 |
+
|
| 945 |
+
Examples:
|
| 946 |
+
|
| 947 |
+
```python
|
| 948 |
+
>>> from PIL import Image
|
| 949 |
+
>>> import requests
|
| 950 |
+
>>> from transformers import AutoProcessor, FlaxCLIPModel
|
| 951 |
+
|
| 952 |
+
>>> model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 953 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 954 |
+
|
| 955 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 956 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 957 |
+
|
| 958 |
+
>>> inputs = processor(images=image, return_tensors="np")
|
| 959 |
+
|
| 960 |
+
>>> image_features = model.get_image_features(**inputs)
|
| 961 |
+
```"""
|
| 962 |
+
pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
|
| 963 |
+
|
| 964 |
+
# Handle any PRNG if needed
|
| 965 |
+
rngs = {}
|
| 966 |
+
if dropout_rng is not None:
|
| 967 |
+
rngs["dropout"] = dropout_rng
|
| 968 |
+
|
| 969 |
+
def _get_features(module, pixel_values, deterministic):
|
| 970 |
+
vision_outputs = module.vision_model(pixel_values=pixel_values, deterministic=deterministic)
|
| 971 |
+
pooled_output = vision_outputs[1] # pooled_output
|
| 972 |
+
image_features = module.visual_projection(pooled_output)
|
| 973 |
+
return image_features
|
| 974 |
+
|
| 975 |
+
return self.module.apply(
|
| 976 |
+
{"params": params or self.params},
|
| 977 |
+
jnp.array(pixel_values, dtype=jnp.float32),
|
| 978 |
+
not train,
|
| 979 |
+
method=_get_features,
|
| 980 |
+
rngs=rngs,
|
| 981 |
+
)
|
| 982 |
+
|
| 983 |
+
|
| 984 |
+
class FlaxCLIPTextModule(nn.Module):
|
| 985 |
+
config: CLIPTextConfig
|
| 986 |
+
dtype: jnp.dtype = jnp.float32
|
| 987 |
+
|
| 988 |
+
def setup(self):
|
| 989 |
+
self.text_model = FlaxCLIPTextTransformer(self.config, dtype=self.dtype)
|
| 990 |
+
|
| 991 |
+
def __call__(
|
| 992 |
+
self,
|
| 993 |
+
input_ids,
|
| 994 |
+
attention_mask,
|
| 995 |
+
position_ids,
|
| 996 |
+
deterministic: bool = True,
|
| 997 |
+
output_attentions: bool = False,
|
| 998 |
+
output_hidden_states: bool = False,
|
| 999 |
+
return_dict: bool = True,
|
| 1000 |
+
):
|
| 1001 |
+
return self.text_model(
|
| 1002 |
+
input_ids=input_ids,
|
| 1003 |
+
attention_mask=attention_mask,
|
| 1004 |
+
position_ids=position_ids,
|
| 1005 |
+
deterministic=deterministic,
|
| 1006 |
+
output_attentions=output_attentions,
|
| 1007 |
+
output_hidden_states=output_hidden_states,
|
| 1008 |
+
return_dict=return_dict,
|
| 1009 |
+
)
|
| 1010 |
+
|
| 1011 |
+
|
| 1012 |
+
class FlaxCLIPTextModel(FlaxCLIPTextPreTrainedModel):
|
| 1013 |
+
module_class = FlaxCLIPTextModule
|
| 1014 |
+
|
| 1015 |
+
|
| 1016 |
+
FLAX_CLIP_TEXT_MODEL_DOCSTRING = """
|
| 1017 |
+
Returns:
|
| 1018 |
+
|
| 1019 |
+
Example:
|
| 1020 |
+
|
| 1021 |
+
```python
|
| 1022 |
+
>>> from transformers import AutoTokenizer, FlaxCLIPTextModel
|
| 1023 |
+
|
| 1024 |
+
>>> model = FlaxCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1025 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 1026 |
+
|
| 1027 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np")
|
| 1028 |
+
|
| 1029 |
+
>>> outputs = model(**inputs)
|
| 1030 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
| 1031 |
+
>>> pooler_output = outputs.pooler_output # pooled (EOS token) states
|
| 1032 |
+
```
|
| 1033 |
+
"""
|
| 1034 |
+
|
| 1035 |
+
overwrite_call_docstring(FlaxCLIPTextModel, CLIP_TEXT_INPUTS_DOCSTRING + FLAX_CLIP_TEXT_MODEL_DOCSTRING)
|
| 1036 |
+
append_replace_return_docstrings(
|
| 1037 |
+
FlaxCLIPTextModel, output_type=FlaxBaseModelOutputWithPooling, config_class=CLIPTextConfig
|
| 1038 |
+
)
|
| 1039 |
+
|
| 1040 |
+
|
| 1041 |
+
class FlaxCLIPTextModelWithProjectionModule(nn.Module):
|
| 1042 |
+
config: CLIPTextConfig
|
| 1043 |
+
dtype: jnp.dtype = jnp.float32
|
| 1044 |
+
|
| 1045 |
+
def setup(self):
|
| 1046 |
+
self.text_model = FlaxCLIPTextTransformer(self.config, dtype=self.dtype)
|
| 1047 |
+
self.text_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype)
|
| 1048 |
+
|
| 1049 |
+
def __call__(
|
| 1050 |
+
self,
|
| 1051 |
+
input_ids,
|
| 1052 |
+
attention_mask,
|
| 1053 |
+
position_ids,
|
| 1054 |
+
deterministic: bool = True,
|
| 1055 |
+
output_attentions: bool = False,
|
| 1056 |
+
output_hidden_states: bool = False,
|
| 1057 |
+
return_dict: bool = True,
|
| 1058 |
+
):
|
| 1059 |
+
text_outputs = self.text_model(
|
| 1060 |
+
input_ids=input_ids,
|
| 1061 |
+
attention_mask=attention_mask,
|
| 1062 |
+
position_ids=position_ids,
|
| 1063 |
+
deterministic=deterministic,
|
| 1064 |
+
output_attentions=output_attentions,
|
| 1065 |
+
output_hidden_states=output_hidden_states,
|
| 1066 |
+
return_dict=return_dict,
|
| 1067 |
+
)
|
| 1068 |
+
|
| 1069 |
+
pooled_output = text_outputs[1]
|
| 1070 |
+
text_embeds = self.text_projection(pooled_output)
|
| 1071 |
+
|
| 1072 |
+
if not return_dict:
|
| 1073 |
+
return (text_embeds, text_outputs[0]) + text_outputs[2:]
|
| 1074 |
+
|
| 1075 |
+
return FlaxCLIPTextModelOutput(
|
| 1076 |
+
text_embeds=text_embeds,
|
| 1077 |
+
last_hidden_state=text_outputs.last_hidden_state,
|
| 1078 |
+
hidden_states=text_outputs.hidden_states,
|
| 1079 |
+
attentions=text_outputs.attentions,
|
| 1080 |
+
)
|
| 1081 |
+
|
| 1082 |
+
|
| 1083 |
+
class FlaxCLIPTextModelWithProjection(FlaxCLIPTextPreTrainedModel):
|
| 1084 |
+
module_class = FlaxCLIPTextModelWithProjectionModule
|
| 1085 |
+
|
| 1086 |
+
|
| 1087 |
+
FLAX_CLIP_TEXT_MODEL_WITH_PROJECTION_DOCSTRING = """
|
| 1088 |
+
Returns:
|
| 1089 |
+
|
| 1090 |
+
Example:
|
| 1091 |
+
|
| 1092 |
+
```python
|
| 1093 |
+
>>> from transformers import AutoTokenizer, FlaxCLIPTextModelWithProjection
|
| 1094 |
+
|
| 1095 |
+
>>> model = FlaxCLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
|
| 1096 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 1097 |
+
|
| 1098 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np")
|
| 1099 |
+
|
| 1100 |
+
>>> outputs = model(**inputs)
|
| 1101 |
+
>>> text_embeds = outputs.text_embeds
|
| 1102 |
+
```
|
| 1103 |
+
"""
|
| 1104 |
+
|
| 1105 |
+
overwrite_call_docstring(
|
| 1106 |
+
FlaxCLIPTextModelWithProjection, CLIP_TEXT_INPUTS_DOCSTRING + FLAX_CLIP_TEXT_MODEL_WITH_PROJECTION_DOCSTRING
|
| 1107 |
+
)
|
| 1108 |
+
append_replace_return_docstrings(
|
| 1109 |
+
FlaxCLIPTextModelWithProjection, output_type=FlaxCLIPTextModelOutput, config_class=CLIPTextConfig
|
| 1110 |
+
)
|
| 1111 |
+
|
| 1112 |
+
|
| 1113 |
+
class FlaxCLIPVisionModule(nn.Module):
|
| 1114 |
+
config: CLIPVisionConfig
|
| 1115 |
+
dtype: jnp.dtype = jnp.float32
|
| 1116 |
+
|
| 1117 |
+
def setup(self):
|
| 1118 |
+
self.vision_model = FlaxCLIPVisionTransformer(self.config, dtype=self.dtype)
|
| 1119 |
+
|
| 1120 |
+
def __call__(
|
| 1121 |
+
self,
|
| 1122 |
+
pixel_values,
|
| 1123 |
+
deterministic: bool = True,
|
| 1124 |
+
output_attentions: bool = False,
|
| 1125 |
+
output_hidden_states: bool = False,
|
| 1126 |
+
return_dict: bool = True,
|
| 1127 |
+
):
|
| 1128 |
+
return self.vision_model(
|
| 1129 |
+
pixel_values=pixel_values,
|
| 1130 |
+
deterministic=deterministic,
|
| 1131 |
+
output_attentions=output_attentions,
|
| 1132 |
+
output_hidden_states=output_hidden_states,
|
| 1133 |
+
return_dict=return_dict,
|
| 1134 |
+
)
|
| 1135 |
+
|
| 1136 |
+
|
| 1137 |
+
class FlaxCLIPVisionModel(FlaxCLIPVisionPreTrainedModel):
|
| 1138 |
+
module_class = FlaxCLIPVisionModule
|
| 1139 |
+
|
| 1140 |
+
|
| 1141 |
+
FLAX_CLIP_VISION_MODEL_DOCSTRING = """
|
| 1142 |
+
Returns:
|
| 1143 |
+
|
| 1144 |
+
Example:
|
| 1145 |
+
|
| 1146 |
+
```python
|
| 1147 |
+
>>> from PIL import Image
|
| 1148 |
+
>>> import requests
|
| 1149 |
+
>>> from transformers import AutoProcessor, FlaxCLIPVisionModel
|
| 1150 |
+
|
| 1151 |
+
>>> model = FlaxCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1152 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1153 |
+
|
| 1154 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1155 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1156 |
+
|
| 1157 |
+
>>> inputs = processor(images=image, return_tensors="np")
|
| 1158 |
+
|
| 1159 |
+
>>> outputs = model(**inputs)
|
| 1160 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
| 1161 |
+
>>> pooler_output = outputs.pooler_output # pooled CLS states
|
| 1162 |
+
```
|
| 1163 |
+
"""
|
| 1164 |
+
|
| 1165 |
+
overwrite_call_docstring(FlaxCLIPVisionModel, CLIP_VISION_INPUTS_DOCSTRING + FLAX_CLIP_VISION_MODEL_DOCSTRING)
|
| 1166 |
+
append_replace_return_docstrings(
|
| 1167 |
+
FlaxCLIPVisionModel, output_type=FlaxBaseModelOutputWithPooling, config_class=CLIPVisionConfig
|
| 1168 |
+
)
|
| 1169 |
+
|
| 1170 |
+
|
| 1171 |
+
class FlaxCLIPModule(nn.Module):
|
| 1172 |
+
config: CLIPConfig
|
| 1173 |
+
dtype: jnp.dtype = jnp.float32
|
| 1174 |
+
|
| 1175 |
+
def setup(self):
|
| 1176 |
+
text_config = self.config.text_config
|
| 1177 |
+
vision_config = self.config.vision_config
|
| 1178 |
+
|
| 1179 |
+
self.projection_dim = self.config.projection_dim
|
| 1180 |
+
self.text_embed_dim = text_config.hidden_size
|
| 1181 |
+
self.vision_embed_dim = vision_config.hidden_size
|
| 1182 |
+
|
| 1183 |
+
self.text_model = FlaxCLIPTextTransformer(text_config, dtype=self.dtype)
|
| 1184 |
+
self.vision_model = FlaxCLIPVisionTransformer(vision_config, dtype=self.dtype)
|
| 1185 |
+
|
| 1186 |
+
self.visual_projection = nn.Dense(
|
| 1187 |
+
self.projection_dim,
|
| 1188 |
+
dtype=self.dtype,
|
| 1189 |
+
kernel_init=jax.nn.initializers.normal(0.02),
|
| 1190 |
+
use_bias=False,
|
| 1191 |
+
)
|
| 1192 |
+
self.text_projection = nn.Dense(
|
| 1193 |
+
self.projection_dim,
|
| 1194 |
+
dtype=self.dtype,
|
| 1195 |
+
kernel_init=jax.nn.initializers.normal(0.02),
|
| 1196 |
+
use_bias=False,
|
| 1197 |
+
)
|
| 1198 |
+
|
| 1199 |
+
self.logit_scale = self.param(
|
| 1200 |
+
"logit_scale", lambda _, shape: jnp.ones(shape) * self.config.logit_scale_init_value, []
|
| 1201 |
+
)
|
| 1202 |
+
|
| 1203 |
+
def __call__(
|
| 1204 |
+
self,
|
| 1205 |
+
input_ids=None,
|
| 1206 |
+
pixel_values=None,
|
| 1207 |
+
attention_mask=None,
|
| 1208 |
+
position_ids=None,
|
| 1209 |
+
deterministic: bool = True,
|
| 1210 |
+
output_attentions=None,
|
| 1211 |
+
output_hidden_states=None,
|
| 1212 |
+
return_dict=None,
|
| 1213 |
+
):
|
| 1214 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
| 1215 |
+
|
| 1216 |
+
vision_outputs = self.vision_model(
|
| 1217 |
+
pixel_values=pixel_values,
|
| 1218 |
+
deterministic=deterministic,
|
| 1219 |
+
output_attentions=output_attentions,
|
| 1220 |
+
output_hidden_states=output_hidden_states,
|
| 1221 |
+
return_dict=return_dict,
|
| 1222 |
+
)
|
| 1223 |
+
|
| 1224 |
+
text_outputs = self.text_model(
|
| 1225 |
+
input_ids=input_ids,
|
| 1226 |
+
attention_mask=attention_mask,
|
| 1227 |
+
position_ids=position_ids,
|
| 1228 |
+
deterministic=deterministic,
|
| 1229 |
+
output_attentions=output_attentions,
|
| 1230 |
+
output_hidden_states=output_hidden_states,
|
| 1231 |
+
return_dict=return_dict,
|
| 1232 |
+
)
|
| 1233 |
+
|
| 1234 |
+
image_embeds = vision_outputs[1]
|
| 1235 |
+
image_embeds = self.visual_projection(image_embeds)
|
| 1236 |
+
|
| 1237 |
+
text_embeds = text_outputs[1]
|
| 1238 |
+
text_embeds = self.text_projection(text_embeds)
|
| 1239 |
+
|
| 1240 |
+
# normalized features
|
| 1241 |
+
image_embeds = image_embeds / jnp.linalg.norm(image_embeds, axis=-1, keepdims=True)
|
| 1242 |
+
text_embeds = text_embeds / jnp.linalg.norm(text_embeds, axis=-1, keepdims=True)
|
| 1243 |
+
|
| 1244 |
+
# cosine similarity as logits
|
| 1245 |
+
logit_scale = jnp.exp(self.logit_scale)
|
| 1246 |
+
logits_per_text = jnp.matmul(text_embeds, image_embeds.T) * logit_scale
|
| 1247 |
+
logits_per_image = logits_per_text.T
|
| 1248 |
+
|
| 1249 |
+
if not return_dict:
|
| 1250 |
+
return (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
|
| 1251 |
+
|
| 1252 |
+
return FlaxCLIPOutput(
|
| 1253 |
+
logits_per_image=logits_per_image,
|
| 1254 |
+
logits_per_text=logits_per_text,
|
| 1255 |
+
text_embeds=text_embeds,
|
| 1256 |
+
image_embeds=image_embeds,
|
| 1257 |
+
text_model_output=text_outputs,
|
| 1258 |
+
vision_model_output=vision_outputs,
|
| 1259 |
+
)
|
| 1260 |
+
|
| 1261 |
+
|
| 1262 |
+
@add_start_docstrings(CLIP_START_DOCSTRING)
|
| 1263 |
+
class FlaxCLIPModel(FlaxCLIPPreTrainedModel):
|
| 1264 |
+
module_class = FlaxCLIPModule
|
| 1265 |
+
|
| 1266 |
+
|
| 1267 |
+
FLAX_CLIP_MODEL_DOCSTRING = """
|
| 1268 |
+
Returns:
|
| 1269 |
+
|
| 1270 |
+
Example:
|
| 1271 |
+
|
| 1272 |
+
```python
|
| 1273 |
+
>>> import jax
|
| 1274 |
+
>>> from PIL import Image
|
| 1275 |
+
>>> import requests
|
| 1276 |
+
>>> from transformers import AutoProcessor, FlaxCLIPModel
|
| 1277 |
+
|
| 1278 |
+
>>> model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1279 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1280 |
+
|
| 1281 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1282 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1283 |
+
|
| 1284 |
+
>>> inputs = processor(
|
| 1285 |
+
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="np", padding=True
|
| 1286 |
+
... )
|
| 1287 |
+
|
| 1288 |
+
>>> outputs = model(**inputs)
|
| 1289 |
+
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
| 1290 |
+
>>> probs = jax.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
|
| 1291 |
+
```
|
| 1292 |
+
"""
|
| 1293 |
+
|
| 1294 |
+
overwrite_call_docstring(FlaxCLIPModel, CLIP_INPUTS_DOCSTRING + FLAX_CLIP_MODEL_DOCSTRING)
|
| 1295 |
+
append_replace_return_docstrings(FlaxCLIPModel, output_type=FlaxCLIPOutput, config_class=CLIPConfig)
|
| 1296 |
+
|
| 1297 |
+
|
| 1298 |
+
__all__ = [
|
| 1299 |
+
"FlaxCLIPModel",
|
| 1300 |
+
"FlaxCLIPPreTrainedModel",
|
| 1301 |
+
"FlaxCLIPTextModel",
|
| 1302 |
+
"FlaxCLIPTextPreTrainedModel",
|
| 1303 |
+
"FlaxCLIPTextModelWithProjection",
|
| 1304 |
+
"FlaxCLIPVisionModel",
|
| 1305 |
+
"FlaxCLIPVisionPreTrainedModel",
|
| 1306 |
+
]
|
janus/lib/python3.10/site-packages/transformers/models/clip/modeling_tf_clip.py
ADDED
|
@@ -0,0 +1,1460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""TF 2.0 CLIP model."""
|
| 16 |
+
|
| 17 |
+
from __future__ import annotations
|
| 18 |
+
|
| 19 |
+
import math
|
| 20 |
+
from dataclasses import dataclass
|
| 21 |
+
from typing import Any, Optional, Tuple, Union
|
| 22 |
+
|
| 23 |
+
import numpy as np
|
| 24 |
+
import tensorflow as tf
|
| 25 |
+
|
| 26 |
+
from ...activations_tf import get_tf_activation
|
| 27 |
+
from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling
|
| 28 |
+
|
| 29 |
+
# Public API
|
| 30 |
+
from ...modeling_tf_utils import (
|
| 31 |
+
TFModelInputType,
|
| 32 |
+
TFPreTrainedModel,
|
| 33 |
+
get_initializer,
|
| 34 |
+
keras,
|
| 35 |
+
keras_serializable,
|
| 36 |
+
unpack_inputs,
|
| 37 |
+
)
|
| 38 |
+
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
|
| 39 |
+
from ...utils import (
|
| 40 |
+
ModelOutput,
|
| 41 |
+
add_start_docstrings,
|
| 42 |
+
add_start_docstrings_to_model_forward,
|
| 43 |
+
logging,
|
| 44 |
+
replace_return_docstrings,
|
| 45 |
+
)
|
| 46 |
+
from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
logger = logging.get_logger(__name__)
|
| 50 |
+
|
| 51 |
+
_CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32"
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
LARGE_NEGATIVE = -1e8
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
|
| 58 |
+
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
|
| 59 |
+
"""
|
| 60 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
| 61 |
+
"""
|
| 62 |
+
src_len = shape_list(mask)[1]
|
| 63 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
| 64 |
+
one_cst = tf.constant(1.0)
|
| 65 |
+
mask = tf.cast(mask, dtype=one_cst.dtype)
|
| 66 |
+
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
|
| 67 |
+
|
| 68 |
+
return (one_cst - expanded_mask) * LARGE_NEGATIVE
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# contrastive loss function, adapted from
|
| 72 |
+
# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
|
| 73 |
+
def contrastive_loss(logits: tf.Tensor) -> tf.Tensor:
|
| 74 |
+
return tf.math.reduce_mean(
|
| 75 |
+
keras.metrics.sparse_categorical_crossentropy(
|
| 76 |
+
y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True
|
| 77 |
+
)
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def clip_loss(similarity: tf.Tensor) -> tf.Tensor:
|
| 82 |
+
caption_loss = contrastive_loss(similarity)
|
| 83 |
+
image_loss = contrastive_loss(tf.transpose(similarity))
|
| 84 |
+
return (caption_loss + image_loss) / 2.0
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@dataclass
|
| 88 |
+
class TFCLIPOutput(ModelOutput):
|
| 89 |
+
"""
|
| 90 |
+
Args:
|
| 91 |
+
loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
|
| 92 |
+
Contrastive loss for image-text similarity.
|
| 93 |
+
logits_per_image:(`tf.Tensor` of shape `(image_batch_size, text_batch_size)`):
|
| 94 |
+
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
|
| 95 |
+
similarity scores.
|
| 96 |
+
logits_per_text:(`tf.Tensor` of shape `(text_batch_size, image_batch_size)`):
|
| 97 |
+
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
|
| 98 |
+
similarity scores.
|
| 99 |
+
text_embeds(`tf.Tensor` of shape `(batch_size, output_dim`):
|
| 100 |
+
The text embeddings obtained by applying the projection layer to the pooled output of [`TFCLIPTextModel`].
|
| 101 |
+
image_embeds(`tf.Tensor` of shape `(batch_size, output_dim`):
|
| 102 |
+
The image embeddings obtained by applying the projection layer to the pooled output of
|
| 103 |
+
[`TFCLIPVisionModel`].
|
| 104 |
+
text_model_output([`~modeling_tf_utils.TFBaseModelOutputWithPooling`]):
|
| 105 |
+
The output of the [`TFCLIPTextModel`].
|
| 106 |
+
vision_model_output([`~modeling_tf_utils.TFBaseModelOutputWithPooling`]):
|
| 107 |
+
The output of the [`TFCLIPVisionModel`].
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
loss: tf.Tensor | None = None
|
| 111 |
+
logits_per_image: tf.Tensor = None
|
| 112 |
+
logits_per_text: tf.Tensor = None
|
| 113 |
+
text_embeds: tf.Tensor = None
|
| 114 |
+
image_embeds: tf.Tensor = None
|
| 115 |
+
text_model_output: TFBaseModelOutputWithPooling = None
|
| 116 |
+
vision_model_output: TFBaseModelOutputWithPooling = None
|
| 117 |
+
|
| 118 |
+
def to_tuple(self) -> Tuple[Any]:
|
| 119 |
+
return tuple(
|
| 120 |
+
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
|
| 121 |
+
for k in self.keys()
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class TFCLIPVisionEmbeddings(keras.layers.Layer):
|
| 126 |
+
def __init__(self, config: CLIPVisionConfig, **kwargs):
|
| 127 |
+
super().__init__(**kwargs)
|
| 128 |
+
|
| 129 |
+
self.embed_dim = config.hidden_size
|
| 130 |
+
self.image_size = config.image_size
|
| 131 |
+
self.patch_size = config.patch_size
|
| 132 |
+
|
| 133 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
| 134 |
+
self.num_positions = self.num_patches + 1
|
| 135 |
+
|
| 136 |
+
self.config = config
|
| 137 |
+
|
| 138 |
+
self.patch_embedding = keras.layers.Conv2D(
|
| 139 |
+
filters=self.embed_dim,
|
| 140 |
+
kernel_size=self.patch_size,
|
| 141 |
+
strides=self.patch_size,
|
| 142 |
+
padding="valid",
|
| 143 |
+
data_format="channels_last",
|
| 144 |
+
use_bias=False,
|
| 145 |
+
kernel_initializer=get_initializer(self.config.initializer_range * self.config.initializer_factor),
|
| 146 |
+
name="patch_embedding",
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
def build(self, input_shape: tf.TensorShape = None):
|
| 150 |
+
factor = self.config.initializer_factor
|
| 151 |
+
|
| 152 |
+
self.class_embedding = self.add_weight(
|
| 153 |
+
shape=(self.embed_dim,),
|
| 154 |
+
initializer=get_initializer(self.embed_dim**-0.5 * factor),
|
| 155 |
+
trainable=True,
|
| 156 |
+
name="class_embedding",
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
with tf.name_scope("position_embedding"):
|
| 160 |
+
self.position_embedding = self.add_weight(
|
| 161 |
+
shape=(self.num_positions, self.embed_dim),
|
| 162 |
+
initializer=get_initializer(self.config.initializer_range * factor),
|
| 163 |
+
trainable=True,
|
| 164 |
+
name="embeddings",
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
if self.built:
|
| 168 |
+
return
|
| 169 |
+
self.built = True
|
| 170 |
+
if getattr(self, "patch_embedding", None) is not None:
|
| 171 |
+
with tf.name_scope(self.patch_embedding.name):
|
| 172 |
+
self.patch_embedding.build([None, None, None, self.config.num_channels])
|
| 173 |
+
|
| 174 |
+
def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
|
| 175 |
+
"""`pixel_values` is expected to be of NCHW format."""
|
| 176 |
+
|
| 177 |
+
batch_size, num_channels, height, width = shape_list(pixel_values)
|
| 178 |
+
|
| 179 |
+
# When running on CPU, `tf.nn.conv2d` doesn't support `NCHW` format.
|
| 180 |
+
# So change the input format from `NCHW` to `NHWC`.
|
| 181 |
+
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
|
| 182 |
+
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
|
| 183 |
+
|
| 184 |
+
patch_embeds = self.patch_embedding(pixel_values)
|
| 185 |
+
|
| 186 |
+
# Change the 2D spatial dimensions to a single temporal dimension.
|
| 187 |
+
# shape = (batch_size, num_patches, out_channels=embed_dim)
|
| 188 |
+
patch_embeds = tf.reshape(tensor=patch_embeds, shape=(batch_size, self.num_patches, -1))
|
| 189 |
+
|
| 190 |
+
# add the [CLS] token to the embedded patch tokens
|
| 191 |
+
class_embeds = tf.broadcast_to(self.class_embedding, shape=(batch_size, 1, self.embed_dim))
|
| 192 |
+
embeddings = tf.concat((class_embeds, patch_embeds), axis=1)
|
| 193 |
+
|
| 194 |
+
embeddings = embeddings + self.position_embedding
|
| 195 |
+
|
| 196 |
+
return embeddings
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
class TFCLIPTextEmbeddings(keras.layers.Layer):
|
| 200 |
+
def __init__(self, config: CLIPTextConfig, **kwargs):
|
| 201 |
+
super().__init__(**kwargs)
|
| 202 |
+
|
| 203 |
+
self.embed_dim = config.hidden_size
|
| 204 |
+
|
| 205 |
+
self.config = config
|
| 206 |
+
|
| 207 |
+
def build(self, input_shape: tf.TensorShape = None):
|
| 208 |
+
with tf.name_scope("token_embedding"):
|
| 209 |
+
self.weight = self.add_weight(
|
| 210 |
+
shape=(self.config.vocab_size, self.embed_dim),
|
| 211 |
+
initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
|
| 212 |
+
trainable=True,
|
| 213 |
+
name="weight",
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
with tf.name_scope("position_embedding"):
|
| 217 |
+
self.position_embedding = self.add_weight(
|
| 218 |
+
shape=(self.config.max_position_embeddings, self.embed_dim),
|
| 219 |
+
initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
|
| 220 |
+
trainable=True,
|
| 221 |
+
name="embeddings",
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
super().build(input_shape)
|
| 225 |
+
|
| 226 |
+
def call(
|
| 227 |
+
self,
|
| 228 |
+
input_ids: tf.Tensor = None,
|
| 229 |
+
position_ids: tf.Tensor = None,
|
| 230 |
+
inputs_embeds: tf.Tensor = None,
|
| 231 |
+
) -> tf.Tensor:
|
| 232 |
+
"""
|
| 233 |
+
Applies embedding based on inputs tensor.
|
| 234 |
+
|
| 235 |
+
Returns:
|
| 236 |
+
final_embeddings (`tf.Tensor`): output embedding tensor.
|
| 237 |
+
"""
|
| 238 |
+
if input_ids is None and inputs_embeds is None:
|
| 239 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 240 |
+
|
| 241 |
+
if inputs_embeds is None:
|
| 242 |
+
check_embeddings_within_bounds(input_ids, self.config.vocab_size)
|
| 243 |
+
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
|
| 244 |
+
|
| 245 |
+
input_shape = shape_list(inputs_embeds)[:-1]
|
| 246 |
+
|
| 247 |
+
if position_ids is None:
|
| 248 |
+
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
|
| 249 |
+
|
| 250 |
+
position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)
|
| 251 |
+
position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
|
| 252 |
+
final_embeddings = inputs_embeds + position_embeds
|
| 253 |
+
|
| 254 |
+
return final_embeddings
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
class TFCLIPAttention(keras.layers.Layer):
|
| 258 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 259 |
+
|
| 260 |
+
def __init__(self, config: CLIPConfig, **kwargs):
|
| 261 |
+
super().__init__(**kwargs)
|
| 262 |
+
|
| 263 |
+
self.embed_dim = config.hidden_size
|
| 264 |
+
self.num_attention_heads = config.num_attention_heads
|
| 265 |
+
self.attention_head_size = self.embed_dim // self.num_attention_heads
|
| 266 |
+
if self.attention_head_size * self.num_attention_heads != self.embed_dim:
|
| 267 |
+
raise ValueError(
|
| 268 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
| 269 |
+
f" {self.num_attention_heads})."
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
factor = config.initializer_factor
|
| 273 |
+
in_proj_std = (self.embed_dim**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor
|
| 274 |
+
out_proj_std = (self.embed_dim**-0.5) * factor
|
| 275 |
+
|
| 276 |
+
self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
|
| 277 |
+
|
| 278 |
+
self.q_proj = keras.layers.Dense(
|
| 279 |
+
units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="q_proj"
|
| 280 |
+
)
|
| 281 |
+
self.k_proj = keras.layers.Dense(
|
| 282 |
+
units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="k_proj"
|
| 283 |
+
)
|
| 284 |
+
self.v_proj = keras.layers.Dense(
|
| 285 |
+
units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="v_proj"
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
self.dropout = keras.layers.Dropout(rate=config.attention_dropout)
|
| 289 |
+
|
| 290 |
+
self.out_proj = keras.layers.Dense(
|
| 291 |
+
units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name="out_proj"
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
# copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention.transpose_for_scores
|
| 295 |
+
def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
|
| 296 |
+
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
|
| 297 |
+
tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
|
| 298 |
+
|
| 299 |
+
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
|
| 300 |
+
return tf.transpose(tensor, perm=[0, 2, 1, 3])
|
| 301 |
+
|
| 302 |
+
def call(
|
| 303 |
+
self,
|
| 304 |
+
hidden_states: tf.Tensor,
|
| 305 |
+
attention_mask: tf.Tensor,
|
| 306 |
+
causal_attention_mask: tf.Tensor,
|
| 307 |
+
output_attentions: bool,
|
| 308 |
+
training: bool = False,
|
| 309 |
+
) -> Tuple[tf.Tensor]:
|
| 310 |
+
"""Input shape: Batch x Time x Channel"""
|
| 311 |
+
|
| 312 |
+
batch_size = shape_list(hidden_states)[0]
|
| 313 |
+
mixed_query_layer = self.q_proj(inputs=hidden_states)
|
| 314 |
+
mixed_key_layer = self.k_proj(inputs=hidden_states)
|
| 315 |
+
mixed_value_layer = self.v_proj(inputs=hidden_states)
|
| 316 |
+
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
|
| 317 |
+
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
|
| 318 |
+
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
|
| 319 |
+
|
| 320 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 321 |
+
# (batch size, num_heads, seq_len_q, seq_len_k)
|
| 322 |
+
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
|
| 323 |
+
dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
|
| 324 |
+
attention_scores = tf.divide(attention_scores, dk)
|
| 325 |
+
|
| 326 |
+
# apply the causal_attention_mask first
|
| 327 |
+
if causal_attention_mask is not None:
|
| 328 |
+
# Apply the causal attention mask (precomputed for all layers in TFCLIPModel call() function)
|
| 329 |
+
attention_scores = tf.add(attention_scores, causal_attention_mask)
|
| 330 |
+
|
| 331 |
+
if attention_mask is not None:
|
| 332 |
+
# Apply the attention mask (precomputed for all layers in TFCLIPModel call() function)
|
| 333 |
+
attention_scores = tf.add(attention_scores, attention_mask)
|
| 334 |
+
|
| 335 |
+
# Normalize the attention scores to probabilities.
|
| 336 |
+
_attention_probs = stable_softmax(logits=attention_scores, axis=-1)
|
| 337 |
+
|
| 338 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 339 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 340 |
+
attention_probs = self.dropout(inputs=_attention_probs, training=training)
|
| 341 |
+
|
| 342 |
+
attention_output = tf.matmul(attention_probs, value_layer)
|
| 343 |
+
attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
|
| 344 |
+
|
| 345 |
+
# (batch_size, seq_len_q, embed_dim)
|
| 346 |
+
attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim))
|
| 347 |
+
|
| 348 |
+
attention_output = self.out_proj(attention_output, training=training)
|
| 349 |
+
# In TFBert, attention weights are returned after dropout.
|
| 350 |
+
# However, in CLIP, they are returned before dropout.
|
| 351 |
+
outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,)
|
| 352 |
+
|
| 353 |
+
return outputs
|
| 354 |
+
|
| 355 |
+
def build(self, input_shape=None):
|
| 356 |
+
if self.built:
|
| 357 |
+
return
|
| 358 |
+
self.built = True
|
| 359 |
+
if getattr(self, "q_proj", None) is not None:
|
| 360 |
+
with tf.name_scope(self.q_proj.name):
|
| 361 |
+
self.q_proj.build([None, None, self.embed_dim])
|
| 362 |
+
if getattr(self, "k_proj", None) is not None:
|
| 363 |
+
with tf.name_scope(self.k_proj.name):
|
| 364 |
+
self.k_proj.build([None, None, self.embed_dim])
|
| 365 |
+
if getattr(self, "v_proj", None) is not None:
|
| 366 |
+
with tf.name_scope(self.v_proj.name):
|
| 367 |
+
self.v_proj.build([None, None, self.embed_dim])
|
| 368 |
+
if getattr(self, "out_proj", None) is not None:
|
| 369 |
+
with tf.name_scope(self.out_proj.name):
|
| 370 |
+
self.out_proj.build([None, None, self.embed_dim])
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
class TFCLIPMLP(keras.layers.Layer):
|
| 374 |
+
def __init__(self, config: CLIPConfig, **kwargs):
|
| 375 |
+
super().__init__(**kwargs)
|
| 376 |
+
|
| 377 |
+
self.activation_fn = get_tf_activation(config.hidden_act)
|
| 378 |
+
|
| 379 |
+
factor = config.initializer_factor
|
| 380 |
+
in_proj_std = (config.hidden_size**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor
|
| 381 |
+
fc_std = (2 * config.hidden_size) ** -0.5 * factor
|
| 382 |
+
|
| 383 |
+
self.fc1 = keras.layers.Dense(
|
| 384 |
+
units=config.intermediate_size, kernel_initializer=get_initializer(fc_std), name="fc1"
|
| 385 |
+
)
|
| 386 |
+
self.fc2 = keras.layers.Dense(
|
| 387 |
+
units=config.hidden_size, kernel_initializer=get_initializer(in_proj_std), name="fc2"
|
| 388 |
+
)
|
| 389 |
+
self.config = config
|
| 390 |
+
|
| 391 |
+
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
| 392 |
+
hidden_states = self.fc1(inputs=hidden_states)
|
| 393 |
+
hidden_states = self.activation_fn(hidden_states)
|
| 394 |
+
hidden_states = self.fc2(inputs=hidden_states)
|
| 395 |
+
return hidden_states
|
| 396 |
+
|
| 397 |
+
def build(self, input_shape=None):
|
| 398 |
+
if self.built:
|
| 399 |
+
return
|
| 400 |
+
self.built = True
|
| 401 |
+
if getattr(self, "fc1", None) is not None:
|
| 402 |
+
with tf.name_scope(self.fc1.name):
|
| 403 |
+
self.fc1.build([None, None, self.config.hidden_size])
|
| 404 |
+
if getattr(self, "fc2", None) is not None:
|
| 405 |
+
with tf.name_scope(self.fc2.name):
|
| 406 |
+
self.fc2.build([None, None, self.config.intermediate_size])
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
class TFCLIPEncoderLayer(keras.layers.Layer):
|
| 410 |
+
def __init__(self, config: CLIPConfig, **kwargs):
|
| 411 |
+
super().__init__(**kwargs)
|
| 412 |
+
|
| 413 |
+
self.embed_dim = config.hidden_size
|
| 414 |
+
self.self_attn = TFCLIPAttention(config, name="self_attn")
|
| 415 |
+
self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
|
| 416 |
+
self.mlp = TFCLIPMLP(config, name="mlp")
|
| 417 |
+
self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
|
| 418 |
+
|
| 419 |
+
def call(
|
| 420 |
+
self,
|
| 421 |
+
hidden_states: tf.Tensor,
|
| 422 |
+
attention_mask: tf.Tensor,
|
| 423 |
+
causal_attention_mask: tf.Tensor,
|
| 424 |
+
output_attentions: bool,
|
| 425 |
+
training: bool = False,
|
| 426 |
+
) -> Tuple[tf.Tensor]:
|
| 427 |
+
"""
|
| 428 |
+
Args:
|
| 429 |
+
hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 430 |
+
attention_mask (`tf.Tensor`): attention mask of size
|
| 431 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 432 |
+
causal_attention_mask (`tf.Tensor`): causal attention mask of size
|
| 433 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 434 |
+
output_attentions (`bool`):
|
| 435 |
+
Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned
|
| 436 |
+
tensors for more detail.
|
| 437 |
+
"""
|
| 438 |
+
residual = hidden_states
|
| 439 |
+
|
| 440 |
+
hidden_states = self.layer_norm1(inputs=hidden_states)
|
| 441 |
+
attention_outputs = self.self_attn(
|
| 442 |
+
hidden_states=hidden_states,
|
| 443 |
+
attention_mask=attention_mask,
|
| 444 |
+
causal_attention_mask=causal_attention_mask,
|
| 445 |
+
output_attentions=output_attentions,
|
| 446 |
+
training=training,
|
| 447 |
+
)
|
| 448 |
+
hidden_states = attention_outputs[0]
|
| 449 |
+
hidden_states = residual + hidden_states
|
| 450 |
+
|
| 451 |
+
residual = hidden_states
|
| 452 |
+
hidden_states = self.layer_norm2(inputs=hidden_states)
|
| 453 |
+
hidden_states = self.mlp(hidden_states=hidden_states)
|
| 454 |
+
hidden_states = residual + hidden_states
|
| 455 |
+
|
| 456 |
+
outputs = (hidden_states,) + attention_outputs[1:] # add attentions if we output them
|
| 457 |
+
|
| 458 |
+
return outputs
|
| 459 |
+
|
| 460 |
+
def build(self, input_shape=None):
|
| 461 |
+
if self.built:
|
| 462 |
+
return
|
| 463 |
+
self.built = True
|
| 464 |
+
if getattr(self, "self_attn", None) is not None:
|
| 465 |
+
with tf.name_scope(self.self_attn.name):
|
| 466 |
+
self.self_attn.build(None)
|
| 467 |
+
if getattr(self, "layer_norm1", None) is not None:
|
| 468 |
+
with tf.name_scope(self.layer_norm1.name):
|
| 469 |
+
self.layer_norm1.build([None, None, self.embed_dim])
|
| 470 |
+
if getattr(self, "mlp", None) is not None:
|
| 471 |
+
with tf.name_scope(self.mlp.name):
|
| 472 |
+
self.mlp.build(None)
|
| 473 |
+
if getattr(self, "layer_norm2", None) is not None:
|
| 474 |
+
with tf.name_scope(self.layer_norm2.name):
|
| 475 |
+
self.layer_norm2.build([None, None, self.embed_dim])
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class TFCLIPEncoder(keras.layers.Layer):
|
| 479 |
+
"""
|
| 480 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| 481 |
+
[`TFCLIPEncoderLayer`].
|
| 482 |
+
|
| 483 |
+
Args:
|
| 484 |
+
config: CLIPConfig
|
| 485 |
+
"""
|
| 486 |
+
|
| 487 |
+
def __init__(self, config: CLIPConfig, **kwargs):
|
| 488 |
+
super().__init__(**kwargs)
|
| 489 |
+
|
| 490 |
+
self.layers = [TFCLIPEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)]
|
| 491 |
+
|
| 492 |
+
def call(
|
| 493 |
+
self,
|
| 494 |
+
hidden_states: tf.Tensor,
|
| 495 |
+
attention_mask: tf.Tensor,
|
| 496 |
+
causal_attention_mask: tf.Tensor,
|
| 497 |
+
output_attentions: bool,
|
| 498 |
+
output_hidden_states: bool,
|
| 499 |
+
return_dict: bool,
|
| 500 |
+
training: bool = False,
|
| 501 |
+
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
|
| 502 |
+
all_hidden_states = () if output_hidden_states else None
|
| 503 |
+
all_attentions = () if output_attentions else None
|
| 504 |
+
|
| 505 |
+
for i, layer_module in enumerate(self.layers):
|
| 506 |
+
if output_hidden_states:
|
| 507 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 508 |
+
|
| 509 |
+
layer_outputs = layer_module(
|
| 510 |
+
hidden_states=hidden_states,
|
| 511 |
+
attention_mask=attention_mask,
|
| 512 |
+
causal_attention_mask=causal_attention_mask,
|
| 513 |
+
output_attentions=output_attentions,
|
| 514 |
+
training=training,
|
| 515 |
+
)
|
| 516 |
+
hidden_states = layer_outputs[0]
|
| 517 |
+
|
| 518 |
+
if output_attentions:
|
| 519 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
| 520 |
+
|
| 521 |
+
# Add last layer
|
| 522 |
+
if output_hidden_states:
|
| 523 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 524 |
+
|
| 525 |
+
if not return_dict:
|
| 526 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
|
| 527 |
+
|
| 528 |
+
return TFBaseModelOutput(
|
| 529 |
+
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
def build(self, input_shape=None):
|
| 533 |
+
if self.built:
|
| 534 |
+
return
|
| 535 |
+
self.built = True
|
| 536 |
+
if getattr(self, "layers", None) is not None:
|
| 537 |
+
for layer in self.layers:
|
| 538 |
+
with tf.name_scope(layer.name):
|
| 539 |
+
layer.build(None)
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
class TFCLIPTextTransformer(keras.layers.Layer):
|
| 543 |
+
def __init__(self, config: CLIPTextConfig, **kwargs):
|
| 544 |
+
super().__init__(**kwargs)
|
| 545 |
+
|
| 546 |
+
self.embeddings = TFCLIPTextEmbeddings(config, name="embeddings")
|
| 547 |
+
self.encoder = TFCLIPEncoder(config, name="encoder")
|
| 548 |
+
self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
|
| 549 |
+
|
| 550 |
+
# For `pooled_output` computation
|
| 551 |
+
self.eos_token_id = config.eos_token_id
|
| 552 |
+
self.embed_dim = config.hidden_size
|
| 553 |
+
|
| 554 |
+
def call(
|
| 555 |
+
self,
|
| 556 |
+
input_ids: TFModelInputType,
|
| 557 |
+
attention_mask: tf.Tensor,
|
| 558 |
+
position_ids: tf.Tensor,
|
| 559 |
+
output_attentions: bool,
|
| 560 |
+
output_hidden_states: bool,
|
| 561 |
+
return_dict: bool,
|
| 562 |
+
training: bool = False,
|
| 563 |
+
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
|
| 564 |
+
input_shape = shape_list(input_ids)
|
| 565 |
+
|
| 566 |
+
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids)
|
| 567 |
+
|
| 568 |
+
batch_size, seq_length = input_shape
|
| 569 |
+
# CLIP's text model uses causal mask, prepare it here.
|
| 570 |
+
# https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
|
| 571 |
+
causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype)
|
| 572 |
+
|
| 573 |
+
# check attention mask and invert
|
| 574 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
| 575 |
+
attention_mask = _expand_mask(attention_mask)
|
| 576 |
+
|
| 577 |
+
encoder_outputs = self.encoder(
|
| 578 |
+
hidden_states=embedding_output,
|
| 579 |
+
attention_mask=attention_mask,
|
| 580 |
+
causal_attention_mask=causal_attention_mask,
|
| 581 |
+
output_attentions=output_attentions,
|
| 582 |
+
output_hidden_states=output_hidden_states,
|
| 583 |
+
return_dict=return_dict,
|
| 584 |
+
training=training,
|
| 585 |
+
)
|
| 586 |
+
|
| 587 |
+
sequence_output = encoder_outputs[0]
|
| 588 |
+
sequence_output = self.final_layer_norm(inputs=sequence_output)
|
| 589 |
+
|
| 590 |
+
if self.eos_token_id == 2:
|
| 591 |
+
# The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
|
| 592 |
+
# A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
|
| 593 |
+
# ------------------------------------------------------------
|
| 594 |
+
# text_embeds.shape = [batch_size, n_ctx, transformer.width]
|
| 595 |
+
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 596 |
+
pooled_output = tf.gather_nd(
|
| 597 |
+
params=sequence_output,
|
| 598 |
+
indices=tf.stack(
|
| 599 |
+
values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1
|
| 600 |
+
),
|
| 601 |
+
)
|
| 602 |
+
else:
|
| 603 |
+
# The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
|
| 604 |
+
pooled_output = tf.gather_nd(
|
| 605 |
+
params=sequence_output,
|
| 606 |
+
indices=tf.stack(
|
| 607 |
+
values=(
|
| 608 |
+
tf.range(input_shape[0], dtype=tf.int64),
|
| 609 |
+
tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1),
|
| 610 |
+
),
|
| 611 |
+
axis=1,
|
| 612 |
+
),
|
| 613 |
+
)
|
| 614 |
+
|
| 615 |
+
if not return_dict:
|
| 616 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 617 |
+
|
| 618 |
+
return TFBaseModelOutputWithPooling(
|
| 619 |
+
last_hidden_state=sequence_output,
|
| 620 |
+
pooler_output=pooled_output,
|
| 621 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 622 |
+
attentions=encoder_outputs.attentions,
|
| 623 |
+
)
|
| 624 |
+
|
| 625 |
+
def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32):
|
| 626 |
+
# It is possible with an unspecified sequence length for seq_length to be
|
| 627 |
+
# a runtime value, which is unsupported by tf.constant. Per the TensorFlow
|
| 628 |
+
# docs, tf.fill can handle runtime dynamic shapes:
|
| 629 |
+
# https://www.tensorflow.org/api_docs/python/tf/fill
|
| 630 |
+
diag = tf.cast(tf.fill((seq_length,), 0.0), dtype)
|
| 631 |
+
|
| 632 |
+
# set an additive 2D attention mask with all places being masked
|
| 633 |
+
to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype)
|
| 634 |
+
|
| 635 |
+
# set diagonal & lower triangular parts to 0 (i.e. the places not to be masked)
|
| 636 |
+
# TIP: think the 2D matrix as the space of (query_seq, key_seq)
|
| 637 |
+
to_mask = tf.linalg.band_part(to_mask, 0, -1)
|
| 638 |
+
# to_mask = tf.linalg.band_part(to_mask, -1, 0)
|
| 639 |
+
to_mask = tf.linalg.set_diag(to_mask, diagonal=diag)
|
| 640 |
+
|
| 641 |
+
return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length))
|
| 642 |
+
|
| 643 |
+
def build(self, input_shape=None):
|
| 644 |
+
if self.built:
|
| 645 |
+
return
|
| 646 |
+
self.built = True
|
| 647 |
+
if getattr(self, "embeddings", None) is not None:
|
| 648 |
+
with tf.name_scope(self.embeddings.name):
|
| 649 |
+
self.embeddings.build(None)
|
| 650 |
+
if getattr(self, "encoder", None) is not None:
|
| 651 |
+
with tf.name_scope(self.encoder.name):
|
| 652 |
+
self.encoder.build(None)
|
| 653 |
+
if getattr(self, "final_layer_norm", None) is not None:
|
| 654 |
+
with tf.name_scope(self.final_layer_norm.name):
|
| 655 |
+
self.final_layer_norm.build([None, None, self.embed_dim])
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
@keras_serializable
|
| 659 |
+
class TFCLIPTextMainLayer(keras.layers.Layer):
|
| 660 |
+
config_class = CLIPTextConfig
|
| 661 |
+
|
| 662 |
+
def __init__(self, config: CLIPTextConfig, **kwargs):
|
| 663 |
+
super().__init__(**kwargs)
|
| 664 |
+
self.config = config
|
| 665 |
+
self.text_model = TFCLIPTextTransformer(config, name="text_model")
|
| 666 |
+
|
| 667 |
+
def get_input_embeddings(self) -> keras.layers.Layer:
|
| 668 |
+
return self.text_model.embeddings
|
| 669 |
+
|
| 670 |
+
def set_input_embeddings(self, value: tf.Variable):
|
| 671 |
+
self.text_model.embeddings.weight = value
|
| 672 |
+
self.text_model.embeddings.vocab_size = shape_list(value)[0]
|
| 673 |
+
|
| 674 |
+
@unpack_inputs
|
| 675 |
+
def call(
|
| 676 |
+
self,
|
| 677 |
+
input_ids: TFModelInputType | None = None,
|
| 678 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 679 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 680 |
+
output_attentions: Optional[bool] = None,
|
| 681 |
+
output_hidden_states: Optional[bool] = None,
|
| 682 |
+
return_dict: Optional[bool] = None,
|
| 683 |
+
training: bool = False,
|
| 684 |
+
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
|
| 685 |
+
if input_ids is None:
|
| 686 |
+
raise ValueError("You have to specify input_ids")
|
| 687 |
+
|
| 688 |
+
input_shape = shape_list(input_ids)
|
| 689 |
+
|
| 690 |
+
if attention_mask is None:
|
| 691 |
+
attention_mask = tf.fill(dims=input_shape, value=1)
|
| 692 |
+
|
| 693 |
+
text_model_outputs = self.text_model(
|
| 694 |
+
input_ids=input_ids,
|
| 695 |
+
attention_mask=attention_mask,
|
| 696 |
+
position_ids=position_ids,
|
| 697 |
+
output_attentions=output_attentions,
|
| 698 |
+
output_hidden_states=output_hidden_states,
|
| 699 |
+
return_dict=return_dict,
|
| 700 |
+
training=training,
|
| 701 |
+
)
|
| 702 |
+
|
| 703 |
+
return text_model_outputs
|
| 704 |
+
|
| 705 |
+
def build(self, input_shape=None):
|
| 706 |
+
if self.built:
|
| 707 |
+
return
|
| 708 |
+
self.built = True
|
| 709 |
+
if getattr(self, "text_model", None) is not None:
|
| 710 |
+
with tf.name_scope(self.text_model.name):
|
| 711 |
+
self.text_model.build(None)
|
| 712 |
+
|
| 713 |
+
|
| 714 |
+
class TFCLIPVisionTransformer(keras.layers.Layer):
|
| 715 |
+
def __init__(self, config: CLIPVisionConfig, **kwargs):
|
| 716 |
+
super().__init__(**kwargs)
|
| 717 |
+
|
| 718 |
+
self.embeddings = TFCLIPVisionEmbeddings(config, name="embeddings")
|
| 719 |
+
self.pre_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="pre_layrnorm")
|
| 720 |
+
self.encoder = TFCLIPEncoder(config, name="encoder")
|
| 721 |
+
self.post_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="post_layernorm")
|
| 722 |
+
self.embed_dim = config.hidden_size
|
| 723 |
+
|
| 724 |
+
def call(
|
| 725 |
+
self,
|
| 726 |
+
pixel_values: TFModelInputType,
|
| 727 |
+
output_attentions: bool,
|
| 728 |
+
output_hidden_states: bool,
|
| 729 |
+
return_dict: bool,
|
| 730 |
+
training: bool = False,
|
| 731 |
+
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
|
| 732 |
+
embedding_output = self.embeddings(pixel_values=pixel_values)
|
| 733 |
+
embedding_output = self.pre_layernorm(inputs=embedding_output)
|
| 734 |
+
|
| 735 |
+
encoder_outputs = self.encoder(
|
| 736 |
+
hidden_states=embedding_output,
|
| 737 |
+
attention_mask=None,
|
| 738 |
+
causal_attention_mask=None,
|
| 739 |
+
output_attentions=output_attentions,
|
| 740 |
+
output_hidden_states=output_hidden_states,
|
| 741 |
+
return_dict=return_dict,
|
| 742 |
+
training=training,
|
| 743 |
+
)
|
| 744 |
+
|
| 745 |
+
sequence_output = encoder_outputs[0]
|
| 746 |
+
pooled_output = sequence_output[:, 0, :]
|
| 747 |
+
pooled_output = self.post_layernorm(inputs=pooled_output)
|
| 748 |
+
|
| 749 |
+
if not return_dict:
|
| 750 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 751 |
+
|
| 752 |
+
return TFBaseModelOutputWithPooling(
|
| 753 |
+
last_hidden_state=sequence_output,
|
| 754 |
+
pooler_output=pooled_output,
|
| 755 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 756 |
+
attentions=encoder_outputs.attentions,
|
| 757 |
+
)
|
| 758 |
+
|
| 759 |
+
def build(self, input_shape=None):
|
| 760 |
+
if self.built:
|
| 761 |
+
return
|
| 762 |
+
self.built = True
|
| 763 |
+
if getattr(self, "embeddings", None) is not None:
|
| 764 |
+
with tf.name_scope(self.embeddings.name):
|
| 765 |
+
self.embeddings.build(None)
|
| 766 |
+
if getattr(self, "pre_layernorm", None) is not None:
|
| 767 |
+
with tf.name_scope(self.pre_layernorm.name):
|
| 768 |
+
self.pre_layernorm.build([None, None, self.embed_dim])
|
| 769 |
+
if getattr(self, "encoder", None) is not None:
|
| 770 |
+
with tf.name_scope(self.encoder.name):
|
| 771 |
+
self.encoder.build(None)
|
| 772 |
+
if getattr(self, "post_layernorm", None) is not None:
|
| 773 |
+
with tf.name_scope(self.post_layernorm.name):
|
| 774 |
+
self.post_layernorm.build([None, self.embed_dim])
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
@keras_serializable
|
| 778 |
+
class TFCLIPVisionMainLayer(keras.layers.Layer):
|
| 779 |
+
config_class = CLIPVisionConfig
|
| 780 |
+
|
| 781 |
+
def __init__(self, config: CLIPVisionConfig, **kwargs):
|
| 782 |
+
super().__init__(**kwargs)
|
| 783 |
+
self.config = config
|
| 784 |
+
self.vision_model = TFCLIPVisionTransformer(config, name="vision_model")
|
| 785 |
+
|
| 786 |
+
def get_input_embeddings(self) -> keras.layers.Layer:
|
| 787 |
+
return self.vision_model.embeddings
|
| 788 |
+
|
| 789 |
+
@unpack_inputs
|
| 790 |
+
def call(
|
| 791 |
+
self,
|
| 792 |
+
pixel_values: TFModelInputType | None = None,
|
| 793 |
+
output_attentions: Optional[bool] = None,
|
| 794 |
+
output_hidden_states: Optional[bool] = None,
|
| 795 |
+
return_dict: Optional[bool] = None,
|
| 796 |
+
training: bool = False,
|
| 797 |
+
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
|
| 798 |
+
if pixel_values is None:
|
| 799 |
+
raise ValueError("You have to specify pixel_values")
|
| 800 |
+
|
| 801 |
+
vision_model_outputs = self.vision_model(
|
| 802 |
+
pixel_values=pixel_values,
|
| 803 |
+
output_attentions=output_attentions,
|
| 804 |
+
output_hidden_states=output_hidden_states,
|
| 805 |
+
return_dict=return_dict,
|
| 806 |
+
training=training,
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
return vision_model_outputs
|
| 810 |
+
|
| 811 |
+
def build(self, input_shape=None):
|
| 812 |
+
if self.built:
|
| 813 |
+
return
|
| 814 |
+
self.built = True
|
| 815 |
+
if getattr(self, "vision_model", None) is not None:
|
| 816 |
+
with tf.name_scope(self.vision_model.name):
|
| 817 |
+
self.vision_model.build(None)
|
| 818 |
+
|
| 819 |
+
|
| 820 |
+
@keras_serializable
|
| 821 |
+
class TFCLIPMainLayer(keras.layers.Layer):
|
| 822 |
+
config_class = CLIPConfig
|
| 823 |
+
|
| 824 |
+
def __init__(self, config: CLIPConfig, **kwargs):
|
| 825 |
+
super().__init__(**kwargs)
|
| 826 |
+
|
| 827 |
+
if not isinstance(config.text_config, CLIPTextConfig):
|
| 828 |
+
raise TypeError(
|
| 829 |
+
"config.text_config is expected to be of type CLIPTextConfig but is of type"
|
| 830 |
+
f" {type(config.text_config)}."
|
| 831 |
+
)
|
| 832 |
+
|
| 833 |
+
if not isinstance(config.vision_config, CLIPVisionConfig):
|
| 834 |
+
raise TypeError(
|
| 835 |
+
"config.vision_config is expected to be of type CLIPVisionConfig but is of type"
|
| 836 |
+
f" {type(config.vision_config)}."
|
| 837 |
+
)
|
| 838 |
+
|
| 839 |
+
self.config = config
|
| 840 |
+
|
| 841 |
+
text_config = config.text_config
|
| 842 |
+
vision_config = config.vision_config
|
| 843 |
+
|
| 844 |
+
self.projection_dim = config.projection_dim
|
| 845 |
+
|
| 846 |
+
self.text_model = TFCLIPTextTransformer(text_config, name="text_model")
|
| 847 |
+
self.vision_model = TFCLIPVisionTransformer(vision_config, name="vision_model")
|
| 848 |
+
|
| 849 |
+
self.visual_projection = keras.layers.Dense(
|
| 850 |
+
units=self.projection_dim,
|
| 851 |
+
kernel_initializer=get_initializer(vision_config.hidden_size**-0.5 * self.config.initializer_factor),
|
| 852 |
+
use_bias=False,
|
| 853 |
+
name="visual_projection",
|
| 854 |
+
)
|
| 855 |
+
|
| 856 |
+
self.text_projection = keras.layers.Dense(
|
| 857 |
+
units=self.projection_dim,
|
| 858 |
+
kernel_initializer=get_initializer(text_config.hidden_size**-0.5 * self.config.initializer_factor),
|
| 859 |
+
use_bias=False,
|
| 860 |
+
name="text_projection",
|
| 861 |
+
)
|
| 862 |
+
self.text_embed_dim = text_config.hidden_size
|
| 863 |
+
self.vision_embed_dim = vision_config.hidden_size
|
| 864 |
+
|
| 865 |
+
def build(self, input_shape: tf.TensorShape = None):
|
| 866 |
+
self.logit_scale = self.add_weight(
|
| 867 |
+
shape=(1,),
|
| 868 |
+
initializer=keras.initializers.Constant(self.config.logit_scale_init_value),
|
| 869 |
+
trainable=True,
|
| 870 |
+
name="logit_scale",
|
| 871 |
+
)
|
| 872 |
+
|
| 873 |
+
if self.built:
|
| 874 |
+
return
|
| 875 |
+
self.built = True
|
| 876 |
+
if getattr(self, "text_model", None) is not None:
|
| 877 |
+
with tf.name_scope(self.text_model.name):
|
| 878 |
+
self.text_model.build(None)
|
| 879 |
+
if getattr(self, "vision_model", None) is not None:
|
| 880 |
+
with tf.name_scope(self.vision_model.name):
|
| 881 |
+
self.vision_model.build(None)
|
| 882 |
+
if getattr(self, "visual_projection", None) is not None:
|
| 883 |
+
with tf.name_scope(self.visual_projection.name):
|
| 884 |
+
self.visual_projection.build([None, None, self.vision_embed_dim])
|
| 885 |
+
if getattr(self, "text_projection", None) is not None:
|
| 886 |
+
with tf.name_scope(self.text_projection.name):
|
| 887 |
+
self.text_projection.build([None, None, self.text_embed_dim])
|
| 888 |
+
|
| 889 |
+
@unpack_inputs
|
| 890 |
+
def get_text_features(
|
| 891 |
+
self,
|
| 892 |
+
input_ids: TFModelInputType | None = None,
|
| 893 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 894 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 895 |
+
output_attentions: Optional[bool] = None,
|
| 896 |
+
output_hidden_states: Optional[bool] = None,
|
| 897 |
+
return_dict: Optional[bool] = None,
|
| 898 |
+
training: bool = False,
|
| 899 |
+
) -> tf.Tensor:
|
| 900 |
+
if input_ids is None:
|
| 901 |
+
raise ValueError("You have to specify either input_ids")
|
| 902 |
+
|
| 903 |
+
input_shape = shape_list(input_ids)
|
| 904 |
+
|
| 905 |
+
if attention_mask is None:
|
| 906 |
+
attention_mask = tf.fill(dims=input_shape, value=1)
|
| 907 |
+
|
| 908 |
+
text_outputs = self.text_model(
|
| 909 |
+
input_ids=input_ids,
|
| 910 |
+
attention_mask=attention_mask,
|
| 911 |
+
position_ids=position_ids,
|
| 912 |
+
output_attentions=output_attentions,
|
| 913 |
+
output_hidden_states=output_hidden_states,
|
| 914 |
+
return_dict=return_dict,
|
| 915 |
+
training=training,
|
| 916 |
+
)
|
| 917 |
+
|
| 918 |
+
pooled_output = text_outputs[1]
|
| 919 |
+
text_features = self.text_projection(inputs=pooled_output)
|
| 920 |
+
|
| 921 |
+
return text_features
|
| 922 |
+
|
| 923 |
+
@unpack_inputs
|
| 924 |
+
def get_image_features(
|
| 925 |
+
self,
|
| 926 |
+
pixel_values: TFModelInputType | None = None,
|
| 927 |
+
output_attentions: Optional[bool] = None,
|
| 928 |
+
output_hidden_states: Optional[bool] = None,
|
| 929 |
+
return_dict: Optional[bool] = None,
|
| 930 |
+
training: bool = False,
|
| 931 |
+
) -> tf.Tensor:
|
| 932 |
+
if pixel_values is None:
|
| 933 |
+
raise ValueError("You have to specify pixel_values")
|
| 934 |
+
|
| 935 |
+
vision_outputs = self.vision_model(
|
| 936 |
+
pixel_values=pixel_values,
|
| 937 |
+
output_attentions=output_attentions,
|
| 938 |
+
output_hidden_states=output_hidden_states,
|
| 939 |
+
return_dict=return_dict,
|
| 940 |
+
training=training,
|
| 941 |
+
)
|
| 942 |
+
|
| 943 |
+
pooled_output = vision_outputs[1] # pooled_output
|
| 944 |
+
image_features = self.visual_projection(inputs=pooled_output)
|
| 945 |
+
|
| 946 |
+
return image_features
|
| 947 |
+
|
| 948 |
+
@unpack_inputs
|
| 949 |
+
def call(
|
| 950 |
+
self,
|
| 951 |
+
input_ids: TFModelInputType | None = None,
|
| 952 |
+
pixel_values: TFModelInputType | None = None,
|
| 953 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 954 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 955 |
+
return_loss: Optional[bool] = None,
|
| 956 |
+
output_attentions: Optional[bool] = None,
|
| 957 |
+
output_hidden_states: Optional[bool] = None,
|
| 958 |
+
return_dict: Optional[bool] = None,
|
| 959 |
+
training: bool = False,
|
| 960 |
+
) -> Union[TFCLIPOutput, Tuple[tf.Tensor]]:
|
| 961 |
+
if input_ids is None:
|
| 962 |
+
raise ValueError("You have to specify either input_ids")
|
| 963 |
+
if pixel_values is None:
|
| 964 |
+
raise ValueError("You have to specify pixel_values")
|
| 965 |
+
|
| 966 |
+
input_shape = shape_list(input_ids)
|
| 967 |
+
|
| 968 |
+
if attention_mask is None:
|
| 969 |
+
attention_mask = tf.fill(dims=input_shape, value=1)
|
| 970 |
+
|
| 971 |
+
vision_outputs = self.vision_model(
|
| 972 |
+
pixel_values=pixel_values,
|
| 973 |
+
output_attentions=output_attentions,
|
| 974 |
+
output_hidden_states=output_hidden_states,
|
| 975 |
+
return_dict=return_dict,
|
| 976 |
+
training=training,
|
| 977 |
+
)
|
| 978 |
+
|
| 979 |
+
text_outputs = self.text_model(
|
| 980 |
+
input_ids=input_ids,
|
| 981 |
+
attention_mask=attention_mask,
|
| 982 |
+
position_ids=position_ids,
|
| 983 |
+
output_attentions=output_attentions,
|
| 984 |
+
output_hidden_states=output_hidden_states,
|
| 985 |
+
return_dict=return_dict,
|
| 986 |
+
training=training,
|
| 987 |
+
)
|
| 988 |
+
|
| 989 |
+
image_embeds = vision_outputs[1]
|
| 990 |
+
image_embeds = self.visual_projection(inputs=image_embeds)
|
| 991 |
+
|
| 992 |
+
text_embeds = text_outputs[1]
|
| 993 |
+
text_embeds = self.text_projection(inputs=text_embeds)
|
| 994 |
+
|
| 995 |
+
# normalized features
|
| 996 |
+
image_embeds = image_embeds / tf.norm(tensor=image_embeds, ord="euclidean", axis=-1, keepdims=True)
|
| 997 |
+
text_embeds = text_embeds / tf.norm(tensor=text_embeds, ord="euclidean", axis=-1, keepdims=True)
|
| 998 |
+
|
| 999 |
+
# cosine similarity as logits
|
| 1000 |
+
logit_scale = tf.math.exp(self.logit_scale)
|
| 1001 |
+
logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale
|
| 1002 |
+
logits_per_image = tf.transpose(logits_per_text)
|
| 1003 |
+
|
| 1004 |
+
loss = None
|
| 1005 |
+
if return_loss:
|
| 1006 |
+
loss = clip_loss(logits_per_text)
|
| 1007 |
+
loss = tf.reshape(loss, (1,))
|
| 1008 |
+
|
| 1009 |
+
if not return_dict:
|
| 1010 |
+
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
|
| 1011 |
+
return (loss,) + output if loss is not None else output
|
| 1012 |
+
|
| 1013 |
+
return TFCLIPOutput(
|
| 1014 |
+
loss=loss,
|
| 1015 |
+
logits_per_image=logits_per_image,
|
| 1016 |
+
logits_per_text=logits_per_text,
|
| 1017 |
+
text_embeds=text_embeds,
|
| 1018 |
+
image_embeds=image_embeds,
|
| 1019 |
+
text_model_output=text_outputs,
|
| 1020 |
+
vision_model_output=vision_outputs,
|
| 1021 |
+
)
|
| 1022 |
+
|
| 1023 |
+
|
| 1024 |
+
class TFCLIPPreTrainedModel(TFPreTrainedModel):
|
| 1025 |
+
"""
|
| 1026 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 1027 |
+
models.
|
| 1028 |
+
"""
|
| 1029 |
+
|
| 1030 |
+
config_class = CLIPConfig
|
| 1031 |
+
base_model_prefix = "clip"
|
| 1032 |
+
_keys_to_ignore_on_load_missing = [r"position_ids"]
|
| 1033 |
+
_keys_to_ignore_on_load_unexpected = [r"position_ids"]
|
| 1034 |
+
|
| 1035 |
+
|
| 1036 |
+
CLIP_START_DOCSTRING = r"""
|
| 1037 |
+
|
| 1038 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 1039 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 1040 |
+
etc.)
|
| 1041 |
+
|
| 1042 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
| 1043 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
| 1044 |
+
behavior.
|
| 1045 |
+
|
| 1046 |
+
<Tip>
|
| 1047 |
+
|
| 1048 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
| 1049 |
+
|
| 1050 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
| 1051 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
| 1052 |
+
|
| 1053 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
| 1054 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
| 1055 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
| 1056 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
| 1057 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
| 1058 |
+
positional argument:
|
| 1059 |
+
|
| 1060 |
+
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
|
| 1061 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
| 1062 |
+
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
|
| 1063 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
| 1064 |
+
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
|
| 1065 |
+
|
| 1066 |
+
Note that when creating models and layers with
|
| 1067 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
| 1068 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
| 1069 |
+
|
| 1070 |
+
</Tip>
|
| 1071 |
+
|
| 1072 |
+
Args:
|
| 1073 |
+
config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
|
| 1074 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 1075 |
+
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
|
| 1076 |
+
"""
|
| 1077 |
+
|
| 1078 |
+
CLIP_TEXT_INPUTS_DOCSTRING = r"""
|
| 1079 |
+
Args:
|
| 1080 |
+
input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
|
| 1081 |
+
Indices of input sequence tokens in the vocabulary.
|
| 1082 |
+
|
| 1083 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
| 1084 |
+
[`PreTrainedTokenizer.encode`] for details.
|
| 1085 |
+
|
| 1086 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1087 |
+
attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
|
| 1088 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1089 |
+
|
| 1090 |
+
- 1 for tokens that are **not masked**,
|
| 1091 |
+
- 0 for tokens that are **masked**.
|
| 1092 |
+
|
| 1093 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1094 |
+
position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
|
| 1095 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 1096 |
+
config.max_position_embeddings - 1]`.
|
| 1097 |
+
|
| 1098 |
+
[What are position IDs?](../glossary#position-ids)
|
| 1099 |
+
output_attentions (`bool`, *optional*):
|
| 1100 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1101 |
+
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
|
| 1102 |
+
config will be used instead.
|
| 1103 |
+
output_hidden_states (`bool`, *optional*):
|
| 1104 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1105 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
| 1106 |
+
used instead.
|
| 1107 |
+
return_dict (`bool`, *optional*):
|
| 1108 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
| 1109 |
+
eager mode, in graph mode the value will always be set to True.
|
| 1110 |
+
training (`bool`, *optional*, defaults to `False``):
|
| 1111 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
| 1112 |
+
behaviors between training and evaluation).
|
| 1113 |
+
"""
|
| 1114 |
+
|
| 1115 |
+
CLIP_VISION_INPUTS_DOCSTRING = r"""
|
| 1116 |
+
Args:
|
| 1117 |
+
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
|
| 1118 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 1119 |
+
[`CLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to
|
| 1120 |
+
return the attentions tensors of all attention layers. See `attentions` under returned tensors for more
|
| 1121 |
+
detail. This argument can be used only in eager mode, in graph mode the value in the config will be used
|
| 1122 |
+
instead.
|
| 1123 |
+
output_hidden_states (`bool`, *optional*):
|
| 1124 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1125 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
| 1126 |
+
used instead.
|
| 1127 |
+
return_dict (`bool`, *optional*):
|
| 1128 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
| 1129 |
+
eager mode, in graph mode the value will always be set to True.
|
| 1130 |
+
training (`bool`, *optional*, defaults to `False``):
|
| 1131 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
| 1132 |
+
behaviors between training and evaluation).
|
| 1133 |
+
"""
|
| 1134 |
+
|
| 1135 |
+
CLIP_INPUTS_DOCSTRING = r"""
|
| 1136 |
+
Args:
|
| 1137 |
+
input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
|
| 1138 |
+
Indices of input sequence tokens in the vocabulary.
|
| 1139 |
+
|
| 1140 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
| 1141 |
+
[`PreTrainedTokenizer.encode`] for details.
|
| 1142 |
+
|
| 1143 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1144 |
+
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
|
| 1145 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 1146 |
+
[`CLIPImageProcessor.__call__`] for details.
|
| 1147 |
+
attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
|
| 1148 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1149 |
+
|
| 1150 |
+
- 1 for tokens that are **not masked**,
|
| 1151 |
+
- 0 for tokens that are **masked**.
|
| 1152 |
+
|
| 1153 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1154 |
+
position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
|
| 1155 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 1156 |
+
config.max_position_embeddings - 1]`.
|
| 1157 |
+
|
| 1158 |
+
[What are position IDs?](../glossary#position-ids)
|
| 1159 |
+
return_loss (`bool`, *optional*):
|
| 1160 |
+
Whether or not to return the contrastive loss.
|
| 1161 |
+
output_attentions (`bool`, *optional*):
|
| 1162 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1163 |
+
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
|
| 1164 |
+
config will be used instead.
|
| 1165 |
+
output_hidden_states (`bool`, *optional*):
|
| 1166 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1167 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
| 1168 |
+
used instead.
|
| 1169 |
+
return_dict (`bool`, *optional*):
|
| 1170 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
| 1171 |
+
eager mode, in graph mode the value will always be set to True.
|
| 1172 |
+
training (`bool`, *optional*, defaults to `False``):
|
| 1173 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
| 1174 |
+
behaviors between training and evaluation).
|
| 1175 |
+
"""
|
| 1176 |
+
|
| 1177 |
+
|
| 1178 |
+
class TFCLIPTextModel(TFCLIPPreTrainedModel):
|
| 1179 |
+
config_class = CLIPTextConfig
|
| 1180 |
+
|
| 1181 |
+
def __init__(self, config: CLIPTextConfig, *inputs, **kwargs):
|
| 1182 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1183 |
+
|
| 1184 |
+
self.clip = TFCLIPTextMainLayer(config, name="clip")
|
| 1185 |
+
|
| 1186 |
+
@unpack_inputs
|
| 1187 |
+
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1188 |
+
@replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=CLIPTextConfig)
|
| 1189 |
+
def call(
|
| 1190 |
+
self,
|
| 1191 |
+
input_ids: TFModelInputType | None = None,
|
| 1192 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 1193 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 1194 |
+
output_attentions: Optional[bool] = None,
|
| 1195 |
+
output_hidden_states: Optional[bool] = None,
|
| 1196 |
+
return_dict: Optional[bool] = None,
|
| 1197 |
+
training: Optional[bool] = False,
|
| 1198 |
+
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
|
| 1199 |
+
r"""
|
| 1200 |
+
Returns:
|
| 1201 |
+
|
| 1202 |
+
Examples:
|
| 1203 |
+
|
| 1204 |
+
```python
|
| 1205 |
+
>>> from transformers import AutoTokenizer, TFCLIPTextModel
|
| 1206 |
+
|
| 1207 |
+
>>> model = TFCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1208 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 1209 |
+
|
| 1210 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
|
| 1211 |
+
|
| 1212 |
+
>>> outputs = model(**inputs)
|
| 1213 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
| 1214 |
+
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
|
| 1215 |
+
```"""
|
| 1216 |
+
|
| 1217 |
+
outputs = self.clip(
|
| 1218 |
+
input_ids=input_ids,
|
| 1219 |
+
attention_mask=attention_mask,
|
| 1220 |
+
position_ids=position_ids,
|
| 1221 |
+
output_attentions=output_attentions,
|
| 1222 |
+
output_hidden_states=output_hidden_states,
|
| 1223 |
+
return_dict=return_dict,
|
| 1224 |
+
training=training,
|
| 1225 |
+
)
|
| 1226 |
+
|
| 1227 |
+
return outputs
|
| 1228 |
+
|
| 1229 |
+
def build(self, input_shape=None):
|
| 1230 |
+
if self.built:
|
| 1231 |
+
return
|
| 1232 |
+
self.built = True
|
| 1233 |
+
if getattr(self, "clip", None) is not None:
|
| 1234 |
+
with tf.name_scope(self.clip.name):
|
| 1235 |
+
self.clip.build(None)
|
| 1236 |
+
|
| 1237 |
+
|
| 1238 |
+
class TFCLIPVisionModel(TFCLIPPreTrainedModel):
|
| 1239 |
+
config_class = CLIPVisionConfig
|
| 1240 |
+
main_input_name = "pixel_values"
|
| 1241 |
+
|
| 1242 |
+
def __init__(self, config: CLIPVisionConfig, *inputs, **kwargs):
|
| 1243 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1244 |
+
|
| 1245 |
+
self.clip = TFCLIPVisionMainLayer(config, name="clip")
|
| 1246 |
+
|
| 1247 |
+
@unpack_inputs
|
| 1248 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 1249 |
+
@replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=CLIPVisionConfig)
|
| 1250 |
+
def call(
|
| 1251 |
+
self,
|
| 1252 |
+
pixel_values: TFModelInputType | None = None,
|
| 1253 |
+
output_attentions: Optional[bool] = None,
|
| 1254 |
+
output_hidden_states: Optional[bool] = None,
|
| 1255 |
+
return_dict: Optional[bool] = None,
|
| 1256 |
+
training: Optional[bool] = False,
|
| 1257 |
+
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
|
| 1258 |
+
r"""
|
| 1259 |
+
Returns:
|
| 1260 |
+
|
| 1261 |
+
Examples:
|
| 1262 |
+
|
| 1263 |
+
```python
|
| 1264 |
+
>>> from PIL import Image
|
| 1265 |
+
>>> import requests
|
| 1266 |
+
>>> from transformers import AutoProcessor, TFCLIPVisionModel
|
| 1267 |
+
|
| 1268 |
+
>>> model = TFCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1269 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1270 |
+
|
| 1271 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1272 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1273 |
+
|
| 1274 |
+
>>> inputs = processor(images=image, return_tensors="tf")
|
| 1275 |
+
|
| 1276 |
+
>>> outputs = model(**inputs)
|
| 1277 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
| 1278 |
+
>>> pooled_output = outputs.pooler_output # pooled CLS states
|
| 1279 |
+
```"""
|
| 1280 |
+
|
| 1281 |
+
outputs = self.clip(
|
| 1282 |
+
pixel_values=pixel_values,
|
| 1283 |
+
output_attentions=output_attentions,
|
| 1284 |
+
output_hidden_states=output_hidden_states,
|
| 1285 |
+
return_dict=return_dict,
|
| 1286 |
+
training=training,
|
| 1287 |
+
)
|
| 1288 |
+
|
| 1289 |
+
return outputs
|
| 1290 |
+
|
| 1291 |
+
def build(self, input_shape=None):
|
| 1292 |
+
if self.built:
|
| 1293 |
+
return
|
| 1294 |
+
self.built = True
|
| 1295 |
+
if getattr(self, "clip", None) is not None:
|
| 1296 |
+
with tf.name_scope(self.clip.name):
|
| 1297 |
+
self.clip.build(None)
|
| 1298 |
+
|
| 1299 |
+
|
| 1300 |
+
@add_start_docstrings(CLIP_START_DOCSTRING)
|
| 1301 |
+
class TFCLIPModel(TFCLIPPreTrainedModel):
|
| 1302 |
+
config_class = CLIPConfig
|
| 1303 |
+
|
| 1304 |
+
def __init__(self, config: CLIPConfig, *inputs, **kwargs):
|
| 1305 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1306 |
+
|
| 1307 |
+
self.clip = TFCLIPMainLayer(config, name="clip")
|
| 1308 |
+
|
| 1309 |
+
@unpack_inputs
|
| 1310 |
+
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1311 |
+
def get_text_features(
|
| 1312 |
+
self,
|
| 1313 |
+
input_ids: TFModelInputType | None = None,
|
| 1314 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 1315 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 1316 |
+
output_attentions: Optional[bool] = None,
|
| 1317 |
+
output_hidden_states: Optional[bool] = None,
|
| 1318 |
+
return_dict: Optional[bool] = None,
|
| 1319 |
+
training: bool = False,
|
| 1320 |
+
) -> tf.Tensor:
|
| 1321 |
+
r"""
|
| 1322 |
+
Returns:
|
| 1323 |
+
text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
|
| 1324 |
+
the projection layer to the pooled output of [`TFCLIPTextModel`].
|
| 1325 |
+
|
| 1326 |
+
Examples:
|
| 1327 |
+
|
| 1328 |
+
```python
|
| 1329 |
+
>>> from transformers import AutoTokenizer, TFCLIPModel
|
| 1330 |
+
|
| 1331 |
+
>>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1332 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 1333 |
+
|
| 1334 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
|
| 1335 |
+
>>> text_features = model.get_text_features(**inputs)
|
| 1336 |
+
```"""
|
| 1337 |
+
|
| 1338 |
+
text_features = self.clip.get_text_features(
|
| 1339 |
+
input_ids=input_ids,
|
| 1340 |
+
attention_mask=attention_mask,
|
| 1341 |
+
position_ids=position_ids,
|
| 1342 |
+
output_attentions=output_attentions,
|
| 1343 |
+
output_hidden_states=output_hidden_states,
|
| 1344 |
+
return_dict=return_dict,
|
| 1345 |
+
)
|
| 1346 |
+
|
| 1347 |
+
return text_features
|
| 1348 |
+
|
| 1349 |
+
@unpack_inputs
|
| 1350 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 1351 |
+
def get_image_features(
|
| 1352 |
+
self,
|
| 1353 |
+
pixel_values: TFModelInputType | None = None,
|
| 1354 |
+
output_attentions: Optional[bool] = None,
|
| 1355 |
+
output_hidden_states: Optional[bool] = None,
|
| 1356 |
+
return_dict: Optional[bool] = None,
|
| 1357 |
+
training: bool = False,
|
| 1358 |
+
) -> tf.Tensor:
|
| 1359 |
+
r"""
|
| 1360 |
+
Returns:
|
| 1361 |
+
image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying
|
| 1362 |
+
the projection layer to the pooled output of [`TFCLIPVisionModel`].
|
| 1363 |
+
|
| 1364 |
+
Examples:
|
| 1365 |
+
|
| 1366 |
+
```python
|
| 1367 |
+
>>> from PIL import Image
|
| 1368 |
+
>>> import requests
|
| 1369 |
+
>>> from transformers import AutoProcessor, TFCLIPModel
|
| 1370 |
+
|
| 1371 |
+
>>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1372 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1373 |
+
|
| 1374 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1375 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1376 |
+
|
| 1377 |
+
>>> inputs = processor(images=image, return_tensors="tf")
|
| 1378 |
+
|
| 1379 |
+
>>> image_features = model.get_image_features(**inputs)
|
| 1380 |
+
```"""
|
| 1381 |
+
|
| 1382 |
+
image_features = self.clip.get_image_features(
|
| 1383 |
+
pixel_values=pixel_values,
|
| 1384 |
+
output_attentions=output_attentions,
|
| 1385 |
+
output_hidden_states=output_hidden_states,
|
| 1386 |
+
return_dict=return_dict,
|
| 1387 |
+
)
|
| 1388 |
+
|
| 1389 |
+
return image_features
|
| 1390 |
+
|
| 1391 |
+
@unpack_inputs
|
| 1392 |
+
@add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1393 |
+
@replace_return_docstrings(output_type=TFCLIPOutput, config_class=CLIPConfig)
|
| 1394 |
+
def call(
|
| 1395 |
+
self,
|
| 1396 |
+
input_ids: TFModelInputType | None = None,
|
| 1397 |
+
pixel_values: TFModelInputType | None = None,
|
| 1398 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 1399 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 1400 |
+
return_loss: Optional[bool] = None,
|
| 1401 |
+
output_attentions: Optional[bool] = None,
|
| 1402 |
+
output_hidden_states: Optional[bool] = None,
|
| 1403 |
+
return_dict: Optional[bool] = None,
|
| 1404 |
+
training: bool = False,
|
| 1405 |
+
) -> Union[TFCLIPOutput, Tuple[tf.Tensor]]:
|
| 1406 |
+
r"""
|
| 1407 |
+
Returns:
|
| 1408 |
+
|
| 1409 |
+
Examples:
|
| 1410 |
+
|
| 1411 |
+
```python
|
| 1412 |
+
>>> import tensorflow as tf
|
| 1413 |
+
>>> from PIL import Image
|
| 1414 |
+
>>> import requests
|
| 1415 |
+
>>> from transformers import AutoProcessor, TFCLIPModel
|
| 1416 |
+
|
| 1417 |
+
>>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1418 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1419 |
+
|
| 1420 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1421 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1422 |
+
|
| 1423 |
+
>>> inputs = processor(
|
| 1424 |
+
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True
|
| 1425 |
+
... )
|
| 1426 |
+
|
| 1427 |
+
>>> outputs = model(**inputs)
|
| 1428 |
+
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
| 1429 |
+
>>> probs = tf.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
|
| 1430 |
+
```"""
|
| 1431 |
+
|
| 1432 |
+
outputs = self.clip(
|
| 1433 |
+
input_ids=input_ids,
|
| 1434 |
+
pixel_values=pixel_values,
|
| 1435 |
+
attention_mask=attention_mask,
|
| 1436 |
+
position_ids=position_ids,
|
| 1437 |
+
return_loss=return_loss,
|
| 1438 |
+
output_attentions=output_attentions,
|
| 1439 |
+
output_hidden_states=output_hidden_states,
|
| 1440 |
+
return_dict=return_dict,
|
| 1441 |
+
)
|
| 1442 |
+
|
| 1443 |
+
return outputs
|
| 1444 |
+
|
| 1445 |
+
def serving_output(self, output: TFCLIPOutput) -> TFCLIPOutput:
|
| 1446 |
+
# TODO: As is this currently fails with saved_model=True, because
|
| 1447 |
+
# TensorFlow cannot trace through nested dataclasses. Reference:
|
| 1448 |
+
# https://github.com/huggingface/transformers/pull/16886
|
| 1449 |
+
return output
|
| 1450 |
+
|
| 1451 |
+
def build(self, input_shape=None):
|
| 1452 |
+
if self.built:
|
| 1453 |
+
return
|
| 1454 |
+
self.built = True
|
| 1455 |
+
if getattr(self, "clip", None) is not None:
|
| 1456 |
+
with tf.name_scope(self.clip.name):
|
| 1457 |
+
self.clip.build(None)
|
| 1458 |
+
|
| 1459 |
+
|
| 1460 |
+
__all__ = ["TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel"]
|
janus/lib/python3.10/site-packages/transformers/models/clip/processing_clip.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""
|
| 16 |
+
Image/Text processor class for CLIP
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import warnings
|
| 20 |
+
|
| 21 |
+
from ...processing_utils import ProcessorMixin
|
| 22 |
+
from ...tokenization_utils_base import BatchEncoding
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class CLIPProcessor(ProcessorMixin):
|
| 26 |
+
r"""
|
| 27 |
+
Constructs a CLIP processor which wraps a CLIP image processor and a CLIP tokenizer into a single processor.
|
| 28 |
+
|
| 29 |
+
[`CLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`CLIPTokenizerFast`]. See the
|
| 30 |
+
[`~CLIPProcessor.__call__`] and [`~CLIPProcessor.decode`] for more information.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
image_processor ([`CLIPImageProcessor`], *optional*):
|
| 34 |
+
The image processor is a required input.
|
| 35 |
+
tokenizer ([`CLIPTokenizerFast`], *optional*):
|
| 36 |
+
The tokenizer is a required input.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
attributes = ["image_processor", "tokenizer"]
|
| 40 |
+
image_processor_class = "CLIPImageProcessor"
|
| 41 |
+
tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast")
|
| 42 |
+
|
| 43 |
+
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
|
| 44 |
+
feature_extractor = None
|
| 45 |
+
if "feature_extractor" in kwargs:
|
| 46 |
+
warnings.warn(
|
| 47 |
+
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
|
| 48 |
+
" instead.",
|
| 49 |
+
FutureWarning,
|
| 50 |
+
)
|
| 51 |
+
feature_extractor = kwargs.pop("feature_extractor")
|
| 52 |
+
|
| 53 |
+
image_processor = image_processor if image_processor is not None else feature_extractor
|
| 54 |
+
if image_processor is None:
|
| 55 |
+
raise ValueError("You need to specify an `image_processor`.")
|
| 56 |
+
if tokenizer is None:
|
| 57 |
+
raise ValueError("You need to specify a `tokenizer`.")
|
| 58 |
+
|
| 59 |
+
super().__init__(image_processor, tokenizer)
|
| 60 |
+
|
| 61 |
+
def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
|
| 62 |
+
"""
|
| 63 |
+
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
|
| 64 |
+
and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode
|
| 65 |
+
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
|
| 66 |
+
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
|
| 67 |
+
of the above two methods for more information.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
text (`str`, `List[str]`, `List[List[str]]`):
|
| 71 |
+
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
| 72 |
+
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
| 73 |
+
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
| 74 |
+
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
| 75 |
+
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
| 76 |
+
tensor. Both channels-first and channels-last formats are supported.
|
| 77 |
+
|
| 78 |
+
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
| 79 |
+
If set, will return tensors of a particular framework. Acceptable values are:
|
| 80 |
+
|
| 81 |
+
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
| 82 |
+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
| 83 |
+
- `'np'`: Return NumPy `np.ndarray` objects.
|
| 84 |
+
- `'jax'`: Return JAX `jnp.ndarray` objects.
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
|
| 88 |
+
|
| 89 |
+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
|
| 90 |
+
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
| 91 |
+
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
|
| 92 |
+
`None`).
|
| 93 |
+
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
|
| 94 |
+
"""
|
| 95 |
+
tokenizer_kwargs, image_processor_kwargs = {}, {}
|
| 96 |
+
if kwargs:
|
| 97 |
+
tokenizer_kwargs = {k: v for k, v in kwargs.items() if k not in self.image_processor._valid_processor_keys}
|
| 98 |
+
image_processor_kwargs = {
|
| 99 |
+
k: v for k, v in kwargs.items() if k in self.image_processor._valid_processor_keys
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
if text is None and images is None:
|
| 103 |
+
raise ValueError("You have to specify either text or images. Both cannot be none.")
|
| 104 |
+
|
| 105 |
+
if text is not None:
|
| 106 |
+
encoding = self.tokenizer(text, return_tensors=return_tensors, **tokenizer_kwargs)
|
| 107 |
+
|
| 108 |
+
if images is not None:
|
| 109 |
+
image_features = self.image_processor(images, return_tensors=return_tensors, **image_processor_kwargs)
|
| 110 |
+
|
| 111 |
+
if text is not None and images is not None:
|
| 112 |
+
encoding["pixel_values"] = image_features.pixel_values
|
| 113 |
+
return encoding
|
| 114 |
+
elif text is not None:
|
| 115 |
+
return encoding
|
| 116 |
+
else:
|
| 117 |
+
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
|
| 118 |
+
|
| 119 |
+
def batch_decode(self, *args, **kwargs):
|
| 120 |
+
"""
|
| 121 |
+
This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
| 122 |
+
refer to the docstring of this method for more information.
|
| 123 |
+
"""
|
| 124 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
| 125 |
+
|
| 126 |
+
def decode(self, *args, **kwargs):
|
| 127 |
+
"""
|
| 128 |
+
This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
| 129 |
+
the docstring of this method for more information.
|
| 130 |
+
"""
|
| 131 |
+
return self.tokenizer.decode(*args, **kwargs)
|
| 132 |
+
|
| 133 |
+
@property
|
| 134 |
+
def model_input_names(self):
|
| 135 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
| 136 |
+
image_processor_input_names = self.image_processor.model_input_names
|
| 137 |
+
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
| 138 |
+
|
| 139 |
+
@property
|
| 140 |
+
def feature_extractor_class(self):
|
| 141 |
+
warnings.warn(
|
| 142 |
+
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
|
| 143 |
+
FutureWarning,
|
| 144 |
+
)
|
| 145 |
+
return self.image_processor_class
|
| 146 |
+
|
| 147 |
+
@property
|
| 148 |
+
def feature_extractor(self):
|
| 149 |
+
warnings.warn(
|
| 150 |
+
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
|
| 151 |
+
FutureWarning,
|
| 152 |
+
)
|
| 153 |
+
return self.image_processor
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
__all__ = ["CLIPProcessor"]
|
janus/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip.py
ADDED
|
@@ -0,0 +1,519 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The Open AI Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for CLIP."""
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
import os
|
| 19 |
+
import unicodedata
|
| 20 |
+
from functools import lru_cache
|
| 21 |
+
from typing import List, Optional, Tuple
|
| 22 |
+
|
| 23 |
+
import regex as re
|
| 24 |
+
|
| 25 |
+
from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
|
| 26 |
+
from ...utils import logging
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
VOCAB_FILES_NAMES = {
|
| 32 |
+
"vocab_file": "vocab.json",
|
| 33 |
+
"merges_file": "merges.txt",
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@lru_cache()
|
| 38 |
+
def bytes_to_unicode():
|
| 39 |
+
"""
|
| 40 |
+
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
|
| 41 |
+
characters the bpe code barfs on.
|
| 42 |
+
|
| 43 |
+
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
|
| 44 |
+
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
|
| 45 |
+
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
|
| 46 |
+
tables between utf-8 bytes and unicode strings.
|
| 47 |
+
"""
|
| 48 |
+
bs = (
|
| 49 |
+
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
|
| 50 |
+
)
|
| 51 |
+
cs = bs[:]
|
| 52 |
+
n = 0
|
| 53 |
+
for b in range(2**8):
|
| 54 |
+
if b not in bs:
|
| 55 |
+
bs.append(b)
|
| 56 |
+
cs.append(2**8 + n)
|
| 57 |
+
n += 1
|
| 58 |
+
cs = [chr(n) for n in cs]
|
| 59 |
+
return dict(zip(bs, cs))
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def get_pairs(word):
|
| 63 |
+
"""
|
| 64 |
+
Return set of symbol pairs in a word.
|
| 65 |
+
|
| 66 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
| 67 |
+
"""
|
| 68 |
+
pairs = set()
|
| 69 |
+
prev_char = word[0]
|
| 70 |
+
for char in word[1:]:
|
| 71 |
+
pairs.add((prev_char, char))
|
| 72 |
+
prev_char = char
|
| 73 |
+
return pairs
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def whitespace_clean(text):
|
| 77 |
+
text = re.sub(r"\s+", " ", text)
|
| 78 |
+
text = text.strip()
|
| 79 |
+
return text
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
|
| 83 |
+
def whitespace_tokenize(text):
|
| 84 |
+
"""Runs basic whitespace cleaning and splitting on a piece of text."""
|
| 85 |
+
text = text.strip()
|
| 86 |
+
if not text:
|
| 87 |
+
return []
|
| 88 |
+
tokens = text.split()
|
| 89 |
+
return tokens
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
|
| 93 |
+
class BasicTokenizer:
|
| 94 |
+
"""
|
| 95 |
+
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
| 99 |
+
Whether or not to lowercase the input when tokenizing.
|
| 100 |
+
never_split (`Iterable`, *optional*):
|
| 101 |
+
Collection of tokens which will never be split during tokenization. Only has an effect when
|
| 102 |
+
`do_basic_tokenize=True`
|
| 103 |
+
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
|
| 104 |
+
Whether or not to tokenize Chinese characters.
|
| 105 |
+
|
| 106 |
+
This should likely be deactivated for Japanese (see this
|
| 107 |
+
[issue](https://github.com/huggingface/transformers/issues/328)).
|
| 108 |
+
strip_accents (`bool`, *optional*):
|
| 109 |
+
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
| 110 |
+
value for `lowercase` (as in the original BERT).
|
| 111 |
+
do_split_on_punc (`bool`, *optional*, defaults to `True`):
|
| 112 |
+
In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
|
| 113 |
+
the full context of the words, such as contractions.
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
def __init__(
|
| 117 |
+
self,
|
| 118 |
+
do_lower_case=True,
|
| 119 |
+
never_split=None,
|
| 120 |
+
tokenize_chinese_chars=True,
|
| 121 |
+
strip_accents=None,
|
| 122 |
+
do_split_on_punc=True,
|
| 123 |
+
):
|
| 124 |
+
if never_split is None:
|
| 125 |
+
never_split = []
|
| 126 |
+
self.do_lower_case = do_lower_case
|
| 127 |
+
self.never_split = set(never_split)
|
| 128 |
+
self.tokenize_chinese_chars = tokenize_chinese_chars
|
| 129 |
+
self.strip_accents = strip_accents
|
| 130 |
+
self.do_split_on_punc = do_split_on_punc
|
| 131 |
+
|
| 132 |
+
def tokenize(self, text, never_split=None):
|
| 133 |
+
"""
|
| 134 |
+
Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
never_split (`List[str]`, *optional*)
|
| 138 |
+
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
|
| 139 |
+
[`PreTrainedTokenizer.tokenize`]) List of token not to split.
|
| 140 |
+
"""
|
| 141 |
+
# union() returns a new set by concatenating the two sets.
|
| 142 |
+
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
|
| 143 |
+
text = self._clean_text(text)
|
| 144 |
+
|
| 145 |
+
# This was added on November 1st, 2018 for the multilingual and Chinese
|
| 146 |
+
# models. This is also applied to the English models now, but it doesn't
|
| 147 |
+
# matter since the English models were not trained on any Chinese data
|
| 148 |
+
# and generally don't have any Chinese data in them (there are Chinese
|
| 149 |
+
# characters in the vocabulary because Wikipedia does have some Chinese
|
| 150 |
+
# words in the English Wikipedia.).
|
| 151 |
+
if self.tokenize_chinese_chars:
|
| 152 |
+
text = self._tokenize_chinese_chars(text)
|
| 153 |
+
# prevents treating the same character with different unicode codepoints as different characters
|
| 154 |
+
unicode_normalized_text = unicodedata.normalize("NFC", text)
|
| 155 |
+
orig_tokens = whitespace_tokenize(unicode_normalized_text)
|
| 156 |
+
split_tokens = []
|
| 157 |
+
for token in orig_tokens:
|
| 158 |
+
if token not in never_split:
|
| 159 |
+
if self.do_lower_case:
|
| 160 |
+
token = token.lower()
|
| 161 |
+
if self.strip_accents is not False:
|
| 162 |
+
token = self._run_strip_accents(token)
|
| 163 |
+
elif self.strip_accents:
|
| 164 |
+
token = self._run_strip_accents(token)
|
| 165 |
+
split_tokens.extend(self._run_split_on_punc(token, never_split))
|
| 166 |
+
|
| 167 |
+
output_tokens = whitespace_tokenize(" ".join(split_tokens))
|
| 168 |
+
return output_tokens
|
| 169 |
+
|
| 170 |
+
def _run_strip_accents(self, text):
|
| 171 |
+
"""Strips accents from a piece of text."""
|
| 172 |
+
text = unicodedata.normalize("NFD", text)
|
| 173 |
+
output = []
|
| 174 |
+
for char in text:
|
| 175 |
+
cat = unicodedata.category(char)
|
| 176 |
+
if cat == "Mn":
|
| 177 |
+
continue
|
| 178 |
+
output.append(char)
|
| 179 |
+
return "".join(output)
|
| 180 |
+
|
| 181 |
+
def _run_split_on_punc(self, text, never_split=None):
|
| 182 |
+
"""Splits punctuation on a piece of text."""
|
| 183 |
+
if not self.do_split_on_punc or (never_split is not None and text in never_split):
|
| 184 |
+
return [text]
|
| 185 |
+
chars = list(text)
|
| 186 |
+
i = 0
|
| 187 |
+
start_new_word = True
|
| 188 |
+
output = []
|
| 189 |
+
while i < len(chars):
|
| 190 |
+
char = chars[i]
|
| 191 |
+
if _is_punctuation(char):
|
| 192 |
+
output.append([char])
|
| 193 |
+
start_new_word = True
|
| 194 |
+
else:
|
| 195 |
+
if start_new_word:
|
| 196 |
+
output.append([])
|
| 197 |
+
start_new_word = False
|
| 198 |
+
output[-1].append(char)
|
| 199 |
+
i += 1
|
| 200 |
+
|
| 201 |
+
return ["".join(x) for x in output]
|
| 202 |
+
|
| 203 |
+
def _tokenize_chinese_chars(self, text):
|
| 204 |
+
"""Adds whitespace around any CJK character."""
|
| 205 |
+
output = []
|
| 206 |
+
for char in text:
|
| 207 |
+
cp = ord(char)
|
| 208 |
+
if self._is_chinese_char(cp):
|
| 209 |
+
output.append(" ")
|
| 210 |
+
output.append(char)
|
| 211 |
+
output.append(" ")
|
| 212 |
+
else:
|
| 213 |
+
output.append(char)
|
| 214 |
+
return "".join(output)
|
| 215 |
+
|
| 216 |
+
def _is_chinese_char(self, cp):
|
| 217 |
+
"""Checks whether CP is the codepoint of a CJK character."""
|
| 218 |
+
# This defines a "chinese character" as anything in the CJK Unicode block:
|
| 219 |
+
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
|
| 220 |
+
#
|
| 221 |
+
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
|
| 222 |
+
# despite its name. The modern Korean Hangul alphabet is a different block,
|
| 223 |
+
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
|
| 224 |
+
# space-separated words, so they are not treated specially and handled
|
| 225 |
+
# like the all of the other languages.
|
| 226 |
+
if (
|
| 227 |
+
(cp >= 0x4E00 and cp <= 0x9FFF)
|
| 228 |
+
or (cp >= 0x3400 and cp <= 0x4DBF) #
|
| 229 |
+
or (cp >= 0x20000 and cp <= 0x2A6DF) #
|
| 230 |
+
or (cp >= 0x2A700 and cp <= 0x2B73F) #
|
| 231 |
+
or (cp >= 0x2B740 and cp <= 0x2B81F) #
|
| 232 |
+
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
|
| 233 |
+
or (cp >= 0xF900 and cp <= 0xFAFF)
|
| 234 |
+
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
|
| 235 |
+
): #
|
| 236 |
+
return True
|
| 237 |
+
|
| 238 |
+
return False
|
| 239 |
+
|
| 240 |
+
def _clean_text(self, text):
|
| 241 |
+
"""Performs invalid character removal and whitespace cleanup on text."""
|
| 242 |
+
output = []
|
| 243 |
+
for char in text:
|
| 244 |
+
cp = ord(char)
|
| 245 |
+
if cp == 0 or cp == 0xFFFD or _is_control(char):
|
| 246 |
+
continue
|
| 247 |
+
if _is_whitespace(char):
|
| 248 |
+
output.append(" ")
|
| 249 |
+
else:
|
| 250 |
+
output.append(char)
|
| 251 |
+
return "".join(output)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class CLIPTokenizer(PreTrainedTokenizer):
|
| 255 |
+
"""
|
| 256 |
+
Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding.
|
| 257 |
+
|
| 258 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
| 259 |
+
this superclass for more information regarding those methods.
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
vocab_file (`str`):
|
| 263 |
+
Path to the vocabulary file.
|
| 264 |
+
merges_file (`str`):
|
| 265 |
+
Path to the merges file.
|
| 266 |
+
errors (`str`, *optional*, defaults to `"replace"`):
|
| 267 |
+
Paradigm to follow when decoding bytes to UTF-8. See
|
| 268 |
+
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
|
| 269 |
+
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 270 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 271 |
+
token instead.
|
| 272 |
+
bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
|
| 273 |
+
The beginning of sequence token.
|
| 274 |
+
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 275 |
+
The end of sequence token.
|
| 276 |
+
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 277 |
+
The token used for padding, for example when batching sequences of different lengths.
|
| 278 |
+
"""
|
| 279 |
+
|
| 280 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 281 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 282 |
+
|
| 283 |
+
def __init__(
|
| 284 |
+
self,
|
| 285 |
+
vocab_file,
|
| 286 |
+
merges_file,
|
| 287 |
+
errors="replace",
|
| 288 |
+
unk_token="<|endoftext|>",
|
| 289 |
+
bos_token="<|startoftext|>",
|
| 290 |
+
eos_token="<|endoftext|>",
|
| 291 |
+
pad_token="<|endoftext|>", # hack to enable padding
|
| 292 |
+
**kwargs,
|
| 293 |
+
):
|
| 294 |
+
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
|
| 295 |
+
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
|
| 296 |
+
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
|
| 297 |
+
try:
|
| 298 |
+
import ftfy
|
| 299 |
+
|
| 300 |
+
self.fix_text = ftfy.fix_text
|
| 301 |
+
except ImportError:
|
| 302 |
+
logger.info("ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy.")
|
| 303 |
+
self.nlp = BasicTokenizer(strip_accents=False, do_split_on_punc=False)
|
| 304 |
+
self.fix_text = None
|
| 305 |
+
|
| 306 |
+
with open(vocab_file, encoding="utf-8") as vocab_handle:
|
| 307 |
+
self.encoder = json.load(vocab_handle)
|
| 308 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
| 309 |
+
self.errors = errors # how to handle errors in decoding
|
| 310 |
+
self.byte_encoder = bytes_to_unicode()
|
| 311 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
| 312 |
+
with open(merges_file, encoding="utf-8") as merges_handle:
|
| 313 |
+
bpe_merges = merges_handle.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1]
|
| 314 |
+
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
|
| 315 |
+
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
|
| 316 |
+
self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"}
|
| 317 |
+
|
| 318 |
+
self.pat = re.compile(
|
| 319 |
+
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
|
| 320 |
+
re.IGNORECASE,
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
super().__init__(
|
| 324 |
+
errors=errors,
|
| 325 |
+
unk_token=unk_token,
|
| 326 |
+
bos_token=bos_token,
|
| 327 |
+
eos_token=eos_token,
|
| 328 |
+
pad_token=pad_token,
|
| 329 |
+
**kwargs,
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
@property
|
| 333 |
+
def vocab_size(self):
|
| 334 |
+
return len(self.encoder)
|
| 335 |
+
|
| 336 |
+
def get_vocab(self):
|
| 337 |
+
return dict(self.encoder, **self.added_tokens_encoder)
|
| 338 |
+
|
| 339 |
+
def build_inputs_with_special_tokens(
|
| 340 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 341 |
+
) -> List[int]:
|
| 342 |
+
"""
|
| 343 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
| 344 |
+
adding special tokens. A CLIP sequence has the following format:
|
| 345 |
+
|
| 346 |
+
- single sequence: `<|startoftext|> X <|endoftext|>`
|
| 347 |
+
|
| 348 |
+
Pairs of sequences are not the expected use case, but they will be handled without a separator.
|
| 349 |
+
|
| 350 |
+
Args:
|
| 351 |
+
token_ids_0 (`List[int]`):
|
| 352 |
+
List of IDs to which the special tokens will be added.
|
| 353 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 354 |
+
Optional second list of IDs for sequence pairs.
|
| 355 |
+
|
| 356 |
+
Returns:
|
| 357 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
| 358 |
+
"""
|
| 359 |
+
bos_token = [self.bos_token_id]
|
| 360 |
+
eos_token = [self.eos_token_id]
|
| 361 |
+
|
| 362 |
+
if token_ids_1 is None:
|
| 363 |
+
return bos_token + token_ids_0 + eos_token
|
| 364 |
+
return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token
|
| 365 |
+
|
| 366 |
+
def get_special_tokens_mask(
|
| 367 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
| 368 |
+
) -> List[int]:
|
| 369 |
+
"""
|
| 370 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
| 371 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
| 372 |
+
|
| 373 |
+
Args:
|
| 374 |
+
token_ids_0 (`List[int]`):
|
| 375 |
+
List of IDs.
|
| 376 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 377 |
+
Optional second list of IDs for sequence pairs.
|
| 378 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
| 379 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
| 380 |
+
|
| 381 |
+
Returns:
|
| 382 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
| 383 |
+
"""
|
| 384 |
+
|
| 385 |
+
if already_has_special_tokens:
|
| 386 |
+
return super().get_special_tokens_mask(
|
| 387 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
if token_ids_1 is None:
|
| 391 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
| 392 |
+
return [1] + ([0] * len(token_ids_0)) + [1] + [1] + ([0] * len(token_ids_1)) + [1]
|
| 393 |
+
|
| 394 |
+
def create_token_type_ids_from_sequences(
|
| 395 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 396 |
+
) -> List[int]:
|
| 397 |
+
"""
|
| 398 |
+
Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of
|
| 399 |
+
zeros is returned.
|
| 400 |
+
|
| 401 |
+
Args:
|
| 402 |
+
token_ids_0 (`List[int]`):
|
| 403 |
+
List of IDs.
|
| 404 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 405 |
+
Optional second list of IDs for sequence pairs.
|
| 406 |
+
|
| 407 |
+
Returns:
|
| 408 |
+
`List[int]`: List of zeros.
|
| 409 |
+
"""
|
| 410 |
+
bos_token = [self.bos_token_id]
|
| 411 |
+
eos_token = [self.eos_token_id]
|
| 412 |
+
|
| 413 |
+
if token_ids_1 is None:
|
| 414 |
+
return len(bos_token + token_ids_0 + eos_token) * [0]
|
| 415 |
+
return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0]
|
| 416 |
+
|
| 417 |
+
def bpe(self, token):
|
| 418 |
+
if token in self.cache:
|
| 419 |
+
return self.cache[token]
|
| 420 |
+
word = tuple(token[:-1]) + (token[-1] + "</w>",)
|
| 421 |
+
pairs = get_pairs(word)
|
| 422 |
+
|
| 423 |
+
if not pairs:
|
| 424 |
+
return token + "</w>"
|
| 425 |
+
|
| 426 |
+
while True:
|
| 427 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
| 428 |
+
if bigram not in self.bpe_ranks:
|
| 429 |
+
break
|
| 430 |
+
first, second = bigram
|
| 431 |
+
new_word = []
|
| 432 |
+
i = 0
|
| 433 |
+
while i < len(word):
|
| 434 |
+
try:
|
| 435 |
+
j = word.index(first, i)
|
| 436 |
+
except ValueError:
|
| 437 |
+
new_word.extend(word[i:])
|
| 438 |
+
break
|
| 439 |
+
else:
|
| 440 |
+
new_word.extend(word[i:j])
|
| 441 |
+
i = j
|
| 442 |
+
|
| 443 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
| 444 |
+
new_word.append(first + second)
|
| 445 |
+
i += 2
|
| 446 |
+
else:
|
| 447 |
+
new_word.append(word[i])
|
| 448 |
+
i += 1
|
| 449 |
+
new_word = tuple(new_word)
|
| 450 |
+
word = new_word
|
| 451 |
+
if len(word) == 1:
|
| 452 |
+
break
|
| 453 |
+
else:
|
| 454 |
+
pairs = get_pairs(word)
|
| 455 |
+
word = " ".join(word)
|
| 456 |
+
self.cache[token] = word
|
| 457 |
+
return word
|
| 458 |
+
|
| 459 |
+
def _tokenize(self, text):
|
| 460 |
+
"""Tokenize a string."""
|
| 461 |
+
bpe_tokens = []
|
| 462 |
+
if self.fix_text is None:
|
| 463 |
+
text = " ".join(self.nlp.tokenize(text))
|
| 464 |
+
else:
|
| 465 |
+
text = whitespace_clean(self.fix_text(text)).lower()
|
| 466 |
+
|
| 467 |
+
for token in re.findall(self.pat, text):
|
| 468 |
+
token = "".join(
|
| 469 |
+
self.byte_encoder[b] for b in token.encode("utf-8")
|
| 470 |
+
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
|
| 471 |
+
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
|
| 472 |
+
return bpe_tokens
|
| 473 |
+
|
| 474 |
+
def _convert_token_to_id(self, token):
|
| 475 |
+
"""Converts a token (str) in an id using the vocab."""
|
| 476 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
| 477 |
+
|
| 478 |
+
def _convert_id_to_token(self, index):
|
| 479 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
| 480 |
+
return self.decoder.get(index)
|
| 481 |
+
|
| 482 |
+
def convert_tokens_to_string(self, tokens):
|
| 483 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
| 484 |
+
text = "".join(tokens)
|
| 485 |
+
byte_array = bytearray([self.byte_decoder[c] for c in text])
|
| 486 |
+
text = byte_array.decode("utf-8", errors=self.errors).replace("</w>", " ").strip()
|
| 487 |
+
return text
|
| 488 |
+
|
| 489 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 490 |
+
if not os.path.isdir(save_directory):
|
| 491 |
+
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
|
| 492 |
+
return
|
| 493 |
+
vocab_file = os.path.join(
|
| 494 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| 495 |
+
)
|
| 496 |
+
merge_file = os.path.join(
|
| 497 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
with open(vocab_file, "w", encoding="utf-8") as f:
|
| 501 |
+
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
| 502 |
+
|
| 503 |
+
index = 0
|
| 504 |
+
with open(merge_file, "w", encoding="utf-8") as writer:
|
| 505 |
+
writer.write("#version: 0.2\n")
|
| 506 |
+
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
| 507 |
+
if index != token_index:
|
| 508 |
+
logger.warning(
|
| 509 |
+
"Saving vocabulary to {}: BPE merge indices are not consecutive."
|
| 510 |
+
" Please check that the tokenizer is not corrupted!".format(merge_file)
|
| 511 |
+
)
|
| 512 |
+
index = token_index
|
| 513 |
+
writer.write(" ".join(bpe_tokens) + "\n")
|
| 514 |
+
index += 1
|
| 515 |
+
|
| 516 |
+
return vocab_file, merge_file
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
__all__ = ["CLIPTokenizer"]
|
janus/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip_fast.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The Open AI Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for OpenAI GPT."""
|
| 16 |
+
|
| 17 |
+
from typing import List, Optional, Tuple
|
| 18 |
+
|
| 19 |
+
from tokenizers import pre_tokenizers
|
| 20 |
+
|
| 21 |
+
from ...tokenization_utils_fast import PreTrainedTokenizerFast
|
| 22 |
+
from ...utils import logging
|
| 23 |
+
from .tokenization_clip import CLIPTokenizer
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__)
|
| 27 |
+
|
| 28 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class CLIPTokenizerFast(PreTrainedTokenizerFast):
|
| 32 |
+
"""
|
| 33 |
+
Construct a "fast" CLIP tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
|
| 34 |
+
Byte-Pair-Encoding.
|
| 35 |
+
|
| 36 |
+
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
|
| 37 |
+
refer to this superclass for more information regarding those methods.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
vocab_file (`str`, *optional*):
|
| 41 |
+
Path to the vocabulary file.
|
| 42 |
+
merges_file (`str`, *optional*):
|
| 43 |
+
Path to the merges file.
|
| 44 |
+
tokenizer_file (`str`, *optional*):
|
| 45 |
+
The path to a tokenizer file to use instead of the vocab file.
|
| 46 |
+
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 47 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 48 |
+
token instead.
|
| 49 |
+
bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
|
| 50 |
+
The beginning of sequence token.
|
| 51 |
+
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 52 |
+
The end of sequence token.
|
| 53 |
+
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 54 |
+
The token used for padding, for example when batching sequences of different lengths.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 58 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 59 |
+
slow_tokenizer_class = CLIPTokenizer
|
| 60 |
+
|
| 61 |
+
def __init__(
|
| 62 |
+
self,
|
| 63 |
+
vocab_file=None,
|
| 64 |
+
merges_file=None,
|
| 65 |
+
tokenizer_file=None,
|
| 66 |
+
unk_token="<|endoftext|>",
|
| 67 |
+
bos_token="<|startoftext|>",
|
| 68 |
+
eos_token="<|endoftext|>",
|
| 69 |
+
pad_token="<|endoftext|>", # hack to enable padding
|
| 70 |
+
**kwargs,
|
| 71 |
+
):
|
| 72 |
+
super().__init__(
|
| 73 |
+
vocab_file,
|
| 74 |
+
merges_file,
|
| 75 |
+
tokenizer_file=tokenizer_file,
|
| 76 |
+
unk_token=unk_token,
|
| 77 |
+
bos_token=bos_token,
|
| 78 |
+
eos_token=eos_token,
|
| 79 |
+
pad_token=pad_token,
|
| 80 |
+
**kwargs,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
if not isinstance(self.backend_tokenizer.pre_tokenizer, pre_tokenizers.Sequence):
|
| 84 |
+
raise ValueError(
|
| 85 |
+
"The `backend_tokenizer` provided does not match the expected format. The CLIP tokenizer has been"
|
| 86 |
+
" heavily modified from transformers version 4.17.0. You need to convert the tokenizer you are using"
|
| 87 |
+
" to be compatible with this version.The easiest way to do so is"
|
| 88 |
+
' `CLIPTokenizerFast.from_pretrained("path_to_local_folder_or_hub_repo, from_slow=True)`. If you want'
|
| 89 |
+
" to use your existing tokenizer, you will have to revert to a version prior to 4.17.0 of"
|
| 90 |
+
" transformers."
|
| 91 |
+
)
|
| 92 |
+
self._wrap_decode_method_backend_tokenizer()
|
| 93 |
+
|
| 94 |
+
# Very ugly hack to enable padding to have a correct decoding see https://github.com/huggingface/tokenizers/issues/872
|
| 95 |
+
def _wrap_decode_method_backend_tokenizer(self):
|
| 96 |
+
orig_decode_method = self.backend_tokenizer.decode
|
| 97 |
+
|
| 98 |
+
## define this as a local variable to avoid circular reference
|
| 99 |
+
## See: https://github.com/huggingface/transformers/issues/30930
|
| 100 |
+
end_of_word_suffix = self.backend_tokenizer.model.end_of_word_suffix
|
| 101 |
+
|
| 102 |
+
def new_decode_method(*args, **kwargs):
|
| 103 |
+
text = orig_decode_method(*args, **kwargs)
|
| 104 |
+
text = text.replace(end_of_word_suffix, " ").strip()
|
| 105 |
+
return text
|
| 106 |
+
|
| 107 |
+
self.backend_tokenizer.decode = new_decode_method
|
| 108 |
+
|
| 109 |
+
def build_inputs_with_special_tokens(
|
| 110 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 111 |
+
) -> List[int]:
|
| 112 |
+
"""
|
| 113 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
| 114 |
+
adding special tokens. A CLIP sequence has the following format:
|
| 115 |
+
|
| 116 |
+
- single sequence: `<|startoftext|> X <|endoftext|>`
|
| 117 |
+
|
| 118 |
+
Pairs of sequences are not the expected use case, but they will be handled without a separator.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
token_ids_0 (`List[int]`):
|
| 122 |
+
List of IDs to which the special tokens will be added.
|
| 123 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 124 |
+
Optional second list of IDs for sequence pairs.
|
| 125 |
+
|
| 126 |
+
Returns:
|
| 127 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
| 128 |
+
"""
|
| 129 |
+
bos_token = [self.bos_token_id]
|
| 130 |
+
eos_token = [self.eos_token_id]
|
| 131 |
+
|
| 132 |
+
if token_ids_1 is None:
|
| 133 |
+
return bos_token + token_ids_0 + eos_token
|
| 134 |
+
return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token
|
| 135 |
+
|
| 136 |
+
def create_token_type_ids_from_sequences(
|
| 137 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 138 |
+
) -> List[int]:
|
| 139 |
+
"""
|
| 140 |
+
Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of
|
| 141 |
+
zeros is returned.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
token_ids_0 (`List[int]`):
|
| 145 |
+
List of IDs.
|
| 146 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 147 |
+
Optional second list of IDs for sequence pairs.
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
`List[int]`: List of zeros.
|
| 151 |
+
"""
|
| 152 |
+
bos_token = [self.bos_token_id]
|
| 153 |
+
eos_token = [self.eos_token_id]
|
| 154 |
+
|
| 155 |
+
if token_ids_1 is None:
|
| 156 |
+
return len(bos_token + token_ids_0 + eos_token) * [0]
|
| 157 |
+
return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0]
|
| 158 |
+
|
| 159 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 160 |
+
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
|
| 161 |
+
return tuple(files)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
__all__ = ["CLIPTokenizerFast"]
|
janus/lib/python3.10/site-packages/transformers/models/code_llama/tokenization_code_llama_fast.py
ADDED
|
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import os
|
| 16 |
+
from shutil import copyfile
|
| 17 |
+
from typing import List, Optional, Tuple
|
| 18 |
+
|
| 19 |
+
from tokenizers import normalizers, processors
|
| 20 |
+
|
| 21 |
+
from ...tokenization_utils_fast import PreTrainedTokenizerFast
|
| 22 |
+
from ...utils import is_sentencepiece_available, logging
|
| 23 |
+
from ...utils.versions import require_version
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
require_version("tokenizers>=0.13.3")
|
| 27 |
+
|
| 28 |
+
if is_sentencepiece_available():
|
| 29 |
+
from .tokenization_code_llama import CodeLlamaTokenizer
|
| 30 |
+
else:
|
| 31 |
+
CodeLlamaTokenizer = None
|
| 32 |
+
|
| 33 |
+
logger = logging.get_logger(__name__)
|
| 34 |
+
VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"}
|
| 35 |
+
|
| 36 |
+
SPIECE_UNDERLINE = "▁"
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
B_INST, E_INST = "[INST]", "[/INST]"
|
| 40 |
+
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
| 41 |
+
|
| 42 |
+
# fmt: off
|
| 43 |
+
DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
|
| 44 |
+
answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
|
| 45 |
+
that your responses are socially unbiased and positive in nature.
|
| 46 |
+
|
| 47 |
+
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
|
| 48 |
+
correct. If you don't know the answer to a question, please don't share false information."""
|
| 49 |
+
# fmt: on
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class CodeLlamaTokenizerFast(PreTrainedTokenizerFast):
|
| 53 |
+
"""
|
| 54 |
+
Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.
|
| 55 |
+
|
| 56 |
+
This uses notably ByteFallback and no normalization.
|
| 57 |
+
|
| 58 |
+
```python
|
| 59 |
+
>>> from transformers import CodeLlamaTokenizerFast
|
| 60 |
+
|
| 61 |
+
>>> tokenizer = CodeLlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer")
|
| 62 |
+
>>> tokenizer.encode("Hello this is a test")
|
| 63 |
+
[1, 15043, 445, 338, 263, 1243]
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
|
| 67 |
+
call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
|
| 68 |
+
values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
|
| 69 |
+
[post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
|
| 73 |
+
refer to this superclass for more information regarding those methods. The default configuration match that of
|
| 74 |
+
[meta-llama/CodeLlama-7b-Instruct-hf](https://huggingface.co/meta-llama/CodeLlama-7b-Instruct-hf/blob/main/tokenizer_config.json)
|
| 75 |
+
which supports prompt infilling.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
vocab_file (`str`, *optional*):
|
| 79 |
+
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
|
| 80 |
+
contains the vocabulary necessary to instantiate a tokenizer.
|
| 81 |
+
tokenizer_file (`str`, *optional*):
|
| 82 |
+
[tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
|
| 83 |
+
contains everything needed to load the tokenizer.
|
| 84 |
+
clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`):
|
| 85 |
+
Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra
|
| 86 |
+
spaces.
|
| 87 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
| 88 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 89 |
+
token instead.
|
| 90 |
+
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
| 91 |
+
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
| 92 |
+
eos_token (`str`, *optional*, defaults to `"</s>"`):
|
| 93 |
+
The end of sequence token.
|
| 94 |
+
prefix_token (`str`, *optional*, defaults to `"▁<PRE>"`):
|
| 95 |
+
Prefix token used for infilling.
|
| 96 |
+
middle_token (`str`, *optional*, defaults to `"▁<MID>"`):
|
| 97 |
+
Middle token used for infilling.
|
| 98 |
+
suffix_token (`str`, *optional*, defaults to `"▁<SUF>"`):
|
| 99 |
+
Suffix token used for infilling.
|
| 100 |
+
eot_token (`str`, *optional*, defaults to `"▁<EOT>"`):
|
| 101 |
+
End of text token used for infilling.
|
| 102 |
+
fill_token (`str`, *optional*, defaults to `"<FILL_ME>"`):
|
| 103 |
+
The token used to split the input between the prefix and suffix.
|
| 104 |
+
additional_special_tokens (`List[str]`, *optional*):
|
| 105 |
+
Additional special tokens used by the tokenizer.
|
| 106 |
+
add_bos_token (`bool`, *optional*, defaults to `True`):
|
| 107 |
+
Whether to add a beginning of sequence token at the start of sequences.
|
| 108 |
+
add_eos_token (`bool`, *optional*, defaults to `False`):
|
| 109 |
+
Whether to add an end of sequence token at the end of sequences.
|
| 110 |
+
use_default_system_prompt (`bool`, *optional*, defaults to `False`):
|
| 111 |
+
Whether or not the default system prompt for Llama should be used.
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 115 |
+
slow_tokenizer_class = CodeLlamaTokenizer
|
| 116 |
+
padding_side = "left"
|
| 117 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 118 |
+
|
| 119 |
+
def __init__(
|
| 120 |
+
self,
|
| 121 |
+
vocab_file=None,
|
| 122 |
+
tokenizer_file=None,
|
| 123 |
+
clean_up_tokenization_spaces=False,
|
| 124 |
+
unk_token="<unk>",
|
| 125 |
+
bos_token="<s>",
|
| 126 |
+
eos_token="</s>",
|
| 127 |
+
prefix_token="▁<PRE>",
|
| 128 |
+
middle_token="▁<MID>",
|
| 129 |
+
suffix_token="▁<SUF>",
|
| 130 |
+
eot_token="▁<EOT>",
|
| 131 |
+
fill_token="<FILL_ME>",
|
| 132 |
+
additional_special_tokens=None,
|
| 133 |
+
add_bos_token=True,
|
| 134 |
+
add_eos_token=False,
|
| 135 |
+
use_default_system_prompt=False,
|
| 136 |
+
**kwargs,
|
| 137 |
+
):
|
| 138 |
+
# mark tokens special to skip them
|
| 139 |
+
additional_special_tokens = additional_special_tokens or []
|
| 140 |
+
for token in [prefix_token, middle_token, suffix_token, eot_token]:
|
| 141 |
+
additional_special_tokens += [token] if token is not None else []
|
| 142 |
+
self.use_default_system_prompt = use_default_system_prompt
|
| 143 |
+
|
| 144 |
+
super().__init__(
|
| 145 |
+
vocab_file=vocab_file,
|
| 146 |
+
tokenizer_file=tokenizer_file,
|
| 147 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| 148 |
+
additional_special_tokens=additional_special_tokens,
|
| 149 |
+
unk_token=unk_token,
|
| 150 |
+
bos_token=bos_token,
|
| 151 |
+
eos_token=eos_token,
|
| 152 |
+
add_bos_token=add_bos_token,
|
| 153 |
+
add_eos_token=add_eos_token,
|
| 154 |
+
prefix_token=prefix_token,
|
| 155 |
+
middle_token=middle_token,
|
| 156 |
+
suffix_token=suffix_token,
|
| 157 |
+
eot_token=eot_token,
|
| 158 |
+
fill_token=fill_token,
|
| 159 |
+
use_default_system_prompt=use_default_system_prompt,
|
| 160 |
+
**kwargs,
|
| 161 |
+
)
|
| 162 |
+
self._add_bos_token = add_bos_token
|
| 163 |
+
self._add_eos_token = add_eos_token
|
| 164 |
+
self.update_post_processor()
|
| 165 |
+
|
| 166 |
+
self.vocab_file = vocab_file
|
| 167 |
+
|
| 168 |
+
self._prefix_token = prefix_token
|
| 169 |
+
self._middle_token = middle_token
|
| 170 |
+
self._suffix_token = suffix_token
|
| 171 |
+
self._eot_token = eot_token
|
| 172 |
+
self.fill_token = fill_token
|
| 173 |
+
|
| 174 |
+
@property
|
| 175 |
+
def can_save_slow_tokenizer(self) -> bool:
|
| 176 |
+
return os.path.isfile(self.vocab_file) if self.vocab_file else False
|
| 177 |
+
|
| 178 |
+
# Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor
|
| 179 |
+
def update_post_processor(self):
|
| 180 |
+
"""
|
| 181 |
+
Updates the underlying post processor with the current `bos_token` and `eos_token`.
|
| 182 |
+
"""
|
| 183 |
+
bos = self.bos_token
|
| 184 |
+
bos_token_id = self.bos_token_id
|
| 185 |
+
if bos is None and self.add_bos_token:
|
| 186 |
+
raise ValueError("add_bos_token = True but bos_token = None")
|
| 187 |
+
|
| 188 |
+
eos = self.eos_token
|
| 189 |
+
eos_token_id = self.eos_token_id
|
| 190 |
+
if eos is None and self.add_eos_token:
|
| 191 |
+
raise ValueError("add_eos_token = True but eos_token = None")
|
| 192 |
+
|
| 193 |
+
single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
|
| 194 |
+
pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
|
| 195 |
+
|
| 196 |
+
special_tokens = []
|
| 197 |
+
if self.add_bos_token:
|
| 198 |
+
special_tokens.append((bos, bos_token_id))
|
| 199 |
+
if self.add_eos_token:
|
| 200 |
+
special_tokens.append((eos, eos_token_id))
|
| 201 |
+
self._tokenizer.post_processor = processors.TemplateProcessing(
|
| 202 |
+
single=single, pair=pair, special_tokens=special_tokens
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
@property
|
| 206 |
+
def prefix_token(self):
|
| 207 |
+
return self._prefix_token
|
| 208 |
+
|
| 209 |
+
@property
|
| 210 |
+
def prefix_id(self):
|
| 211 |
+
if self._prefix_token is None:
|
| 212 |
+
return None
|
| 213 |
+
return self.convert_tokens_to_ids(self.prefix_token)
|
| 214 |
+
|
| 215 |
+
@property
|
| 216 |
+
def middle_token(self):
|
| 217 |
+
return self._middle_token
|
| 218 |
+
|
| 219 |
+
@property
|
| 220 |
+
def middle_id(self):
|
| 221 |
+
if self._middle_token is None:
|
| 222 |
+
return None
|
| 223 |
+
return self.convert_tokens_to_ids(self.middle_token)
|
| 224 |
+
|
| 225 |
+
@property
|
| 226 |
+
def suffix_token(self):
|
| 227 |
+
return self._suffix_token
|
| 228 |
+
|
| 229 |
+
@property
|
| 230 |
+
def suffix_id(self):
|
| 231 |
+
if self._suffix_token is None:
|
| 232 |
+
return None
|
| 233 |
+
return self.convert_tokens_to_ids(self.suffix_token)
|
| 234 |
+
|
| 235 |
+
@property
|
| 236 |
+
def eot_id(self):
|
| 237 |
+
if self._eot_token is None:
|
| 238 |
+
return None
|
| 239 |
+
return self.convert_tokens_to_ids(self.eot_token)
|
| 240 |
+
|
| 241 |
+
@property
|
| 242 |
+
def eot_token(self):
|
| 243 |
+
return self._eot_token
|
| 244 |
+
|
| 245 |
+
@property
|
| 246 |
+
def add_eos_token(self):
|
| 247 |
+
return self._add_eos_token
|
| 248 |
+
|
| 249 |
+
@property
|
| 250 |
+
def add_bos_token(self):
|
| 251 |
+
return self._add_bos_token
|
| 252 |
+
|
| 253 |
+
@add_eos_token.setter
|
| 254 |
+
def add_eos_token(self, value):
|
| 255 |
+
self._add_eos_token = value
|
| 256 |
+
self.update_post_processor()
|
| 257 |
+
|
| 258 |
+
@add_bos_token.setter
|
| 259 |
+
def add_bos_token(self, value):
|
| 260 |
+
self._add_bos_token = value
|
| 261 |
+
self.update_post_processor()
|
| 262 |
+
|
| 263 |
+
def set_infilling_processor(self, reset, suffix_first=False, add_special_tokens=True):
|
| 264 |
+
"""
|
| 265 |
+
Updates the normalizer to make sure the prompt format for `infilling` is respected. The infilling format is the
|
| 266 |
+
following: if suffix_first
|
| 267 |
+
" <PRE> <SUF>{suf} <MID> {pre}"
|
| 268 |
+
else:
|
| 269 |
+
" <PRE> {pre} <SUF>{suf} <MID>"
|
| 270 |
+
|
| 271 |
+
If `reset` is set to `True`, the `normalizer` and `post_processor` are reset to their "normal" behaviour, which
|
| 272 |
+
is to add a prefix space for the normalizer, and add a `bos_token` to the input text for the `post_processor`.
|
| 273 |
+
"""
|
| 274 |
+
if reset:
|
| 275 |
+
self._tokenizer.normalizer = normalizers.Sequence(
|
| 276 |
+
[
|
| 277 |
+
normalizers.Prepend(prepend="▁"),
|
| 278 |
+
normalizers.Replace(pattern=" ", content="▁"),
|
| 279 |
+
]
|
| 280 |
+
)
|
| 281 |
+
self.update_post_processor()
|
| 282 |
+
return
|
| 283 |
+
|
| 284 |
+
self._tokenizer.normalizer = normalizers.Replace(pattern=" ", content="▁")
|
| 285 |
+
pair = [self.bos_token] if self.add_bos_token and add_special_tokens else []
|
| 286 |
+
special_tokens = [(self.bos_token, self.bos_token_id)] if self.add_bos_token and add_special_tokens else []
|
| 287 |
+
if suffix_first:
|
| 288 |
+
# format as " <PRE> <SUF>{suf} <MID> {pre}"
|
| 289 |
+
pair += [self.prefix_token, self.suffix_token, "$B", self.middle_token, "$A"]
|
| 290 |
+
special_tokens += [
|
| 291 |
+
(self.prefix_token, self.prefix_id),
|
| 292 |
+
(self.suffix_token, self.suffix_id),
|
| 293 |
+
(self.middle_token, self.middle_id),
|
| 294 |
+
]
|
| 295 |
+
else:
|
| 296 |
+
# format as " <PRE> {pre} <SUF>{suf} <MID>"
|
| 297 |
+
pair += [self.prefix_token, "$A", self.suffix_token, "$B", self.middle_token]
|
| 298 |
+
special_tokens += [
|
| 299 |
+
(self.prefix_token, self.prefix_id),
|
| 300 |
+
(self.suffix_token, self.suffix_id),
|
| 301 |
+
(self.middle_token, self.middle_id),
|
| 302 |
+
]
|
| 303 |
+
|
| 304 |
+
if self.add_eos_token and add_special_tokens:
|
| 305 |
+
pair += [self.eos_token]
|
| 306 |
+
special_tokens += [(self.eos_token, self.eos_token_id)]
|
| 307 |
+
self._tokenizer.post_processor = processors.TemplateProcessing(
|
| 308 |
+
single="$A", pair=pair, special_tokens=special_tokens
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
def encode_plus(self, text, text_pair=None, suffix_first=False, add_special_tokens=True, **kwargs):
|
| 312 |
+
# hack to make sure the input is pre-process but outside rust
|
| 313 |
+
text_pair = kwargs.pop("suffix", text_pair)
|
| 314 |
+
if self.fill_token is not None and self.fill_token in text and text_pair is None:
|
| 315 |
+
text, text_pair = text.split(self.fill_token)
|
| 316 |
+
|
| 317 |
+
if text_pair is None or len(text_pair) < 1:
|
| 318 |
+
return super().encode_plus(text, text_pair, add_special_tokens=add_special_tokens, **kwargs)
|
| 319 |
+
|
| 320 |
+
if None in (self.prefix_id, self.middle_id, self.suffix_id):
|
| 321 |
+
raise ValueError(
|
| 322 |
+
"Then input includes a `prefix` and a `suffix` used for the infilling task,"
|
| 323 |
+
" the `prefix_id, middle_id, suffix_id` must all be initialized. Current"
|
| 324 |
+
f" values : {self.prefix_id, self.middle_id, self.suffix_id}"
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
self.set_infilling_processor(False, suffix_first=suffix_first, add_special_tokens=add_special_tokens)
|
| 328 |
+
tokens = super().encode_plus(" " + text, text_pair=text_pair, add_special_tokens=True, **kwargs)
|
| 329 |
+
self.set_infilling_processor(True)
|
| 330 |
+
return tokens
|
| 331 |
+
|
| 332 |
+
# Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.save_vocabulary
|
| 333 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 334 |
+
if not self.can_save_slow_tokenizer:
|
| 335 |
+
raise ValueError(
|
| 336 |
+
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
|
| 337 |
+
"tokenizer."
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
if not os.path.isdir(save_directory):
|
| 341 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
| 342 |
+
return
|
| 343 |
+
out_vocab_file = os.path.join(
|
| 344 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
|
| 348 |
+
copyfile(self.vocab_file, out_vocab_file)
|
| 349 |
+
|
| 350 |
+
return (out_vocab_file,)
|
| 351 |
+
|
| 352 |
+
def build_inputs_with_special_tokens(
|
| 353 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 354 |
+
) -> List[int]:
|
| 355 |
+
"""
|
| 356 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
| 357 |
+
adding special tokens. The special tokens depend on calling set_lang.
|
| 358 |
+
|
| 359 |
+
An NLLB sequence has the following format, where `X` represents the sequence:
|
| 360 |
+
|
| 361 |
+
- `input_ids` (for encoder) `X [eos, src_lang_code]`
|
| 362 |
+
- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
|
| 363 |
+
|
| 364 |
+
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
|
| 365 |
+
separator.
|
| 366 |
+
|
| 367 |
+
Args:
|
| 368 |
+
token_ids_0 (`List[int]`):
|
| 369 |
+
List of IDs to which the special tokens will be added.
|
| 370 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 371 |
+
Optional second list of IDs for sequence pairs.
|
| 372 |
+
|
| 373 |
+
Returns:
|
| 374 |
+
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
| 375 |
+
"""
|
| 376 |
+
if token_ids_1 is None:
|
| 377 |
+
return self.bos_token_id + token_ids_0 + self.eos_token_id
|
| 378 |
+
return self.bos_token_id + token_ids_0 + token_ids_1 + self.eos_token_id
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
__all__ = ["CodeLlamaTokenizerFast"]
|
janus/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_deberta.cpython-310.pyc
ADDED
|
Binary file (39.9 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__init__.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import _LazyModule
|
| 17 |
+
from ...utils.import_utils import define_import_structure
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from .configuration_deberta_v2 import *
|
| 22 |
+
from .modeling_deberta_v2 import *
|
| 23 |
+
from .modeling_tf_deberta_v2 import *
|
| 24 |
+
from .tokenization_deberta_v2 import *
|
| 25 |
+
from .tokenization_deberta_v2_fast import *
|
| 26 |
+
else:
|
| 27 |
+
import sys
|
| 28 |
+
|
| 29 |
+
_file = globals()["__file__"]
|
| 30 |
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (658 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/configuration_deberta_v2.cpython-310.pyc
ADDED
|
Binary file (8.13 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_deberta_v2.cpython-310.pyc
ADDED
|
Binary file (42.6 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_tf_deberta_v2.cpython-310.pyc
ADDED
|
Binary file (56.3 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2.cpython-310.pyc
ADDED
|
Binary file (19.3 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2_fast.cpython-310.pyc
ADDED
|
Binary file (8.9 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/deberta_v2/configuration_deberta_v2.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2020, Microsoft and the HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""DeBERTa-v2 model configuration"""
|
| 16 |
+
|
| 17 |
+
from collections import OrderedDict
|
| 18 |
+
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
|
| 19 |
+
|
| 20 |
+
from ...configuration_utils import PretrainedConfig
|
| 21 |
+
from ...onnx import OnnxConfig
|
| 22 |
+
from ...utils import logging
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class DebertaV2Config(PretrainedConfig):
|
| 33 |
+
r"""
|
| 34 |
+
This is the configuration class to store the configuration of a [`DebertaV2Model`]. It is used to instantiate a
|
| 35 |
+
DeBERTa-v2 model according to the specified arguments, defining the model architecture. Instantiating a
|
| 36 |
+
configuration with the defaults will yield a similar configuration to that of the DeBERTa
|
| 37 |
+
[microsoft/deberta-v2-xlarge](https://huggingface.co/microsoft/deberta-v2-xlarge) architecture.
|
| 38 |
+
|
| 39 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 40 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 41 |
+
|
| 42 |
+
Arguments:
|
| 43 |
+
vocab_size (`int`, *optional*, defaults to 128100):
|
| 44 |
+
Vocabulary size of the DeBERTa-v2 model. Defines the number of different tokens that can be represented by
|
| 45 |
+
the `inputs_ids` passed when calling [`DebertaV2Model`].
|
| 46 |
+
hidden_size (`int`, *optional*, defaults to 1536):
|
| 47 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 48 |
+
num_hidden_layers (`int`, *optional*, defaults to 24):
|
| 49 |
+
Number of hidden layers in the Transformer encoder.
|
| 50 |
+
num_attention_heads (`int`, *optional*, defaults to 24):
|
| 51 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 52 |
+
intermediate_size (`int`, *optional*, defaults to 6144):
|
| 53 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
| 54 |
+
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
|
| 55 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 56 |
+
`"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
|
| 57 |
+
are supported.
|
| 58 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 59 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 60 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 61 |
+
The dropout ratio for the attention probabilities.
|
| 62 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
| 63 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 64 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 65 |
+
type_vocab_size (`int`, *optional*, defaults to 0):
|
| 66 |
+
The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
|
| 67 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 68 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 69 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-7):
|
| 70 |
+
The epsilon used by the layer normalization layers.
|
| 71 |
+
relative_attention (`bool`, *optional*, defaults to `True`):
|
| 72 |
+
Whether use relative position encoding.
|
| 73 |
+
max_relative_positions (`int`, *optional*, defaults to -1):
|
| 74 |
+
The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
|
| 75 |
+
as `max_position_embeddings`.
|
| 76 |
+
pad_token_id (`int`, *optional*, defaults to 0):
|
| 77 |
+
The value used to pad input_ids.
|
| 78 |
+
position_biased_input (`bool`, *optional*, defaults to `True`):
|
| 79 |
+
Whether add absolute position embedding to content embedding.
|
| 80 |
+
pos_att_type (`List[str]`, *optional*):
|
| 81 |
+
The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
|
| 82 |
+
`["p2c", "c2p"]`, `["p2c", "c2p"]`.
|
| 83 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 84 |
+
The epsilon used by the layer normalization layers.
|
| 85 |
+
legacy (`bool`, *optional*, defaults to `True`):
|
| 86 |
+
Whether or not the model should use the legacy `LegacyDebertaOnlyMLMHead`, which does not work properly
|
| 87 |
+
for mask infilling tasks.
|
| 88 |
+
|
| 89 |
+
Example:
|
| 90 |
+
|
| 91 |
+
```python
|
| 92 |
+
>>> from transformers import DebertaV2Config, DebertaV2Model
|
| 93 |
+
|
| 94 |
+
>>> # Initializing a DeBERTa-v2 microsoft/deberta-v2-xlarge style configuration
|
| 95 |
+
>>> configuration = DebertaV2Config()
|
| 96 |
+
|
| 97 |
+
>>> # Initializing a model (with random weights) from the microsoft/deberta-v2-xlarge style configuration
|
| 98 |
+
>>> model = DebertaV2Model(configuration)
|
| 99 |
+
|
| 100 |
+
>>> # Accessing the model configuration
|
| 101 |
+
>>> configuration = model.config
|
| 102 |
+
```"""
|
| 103 |
+
|
| 104 |
+
model_type = "deberta-v2"
|
| 105 |
+
|
| 106 |
+
def __init__(
|
| 107 |
+
self,
|
| 108 |
+
vocab_size=128100,
|
| 109 |
+
hidden_size=1536,
|
| 110 |
+
num_hidden_layers=24,
|
| 111 |
+
num_attention_heads=24,
|
| 112 |
+
intermediate_size=6144,
|
| 113 |
+
hidden_act="gelu",
|
| 114 |
+
hidden_dropout_prob=0.1,
|
| 115 |
+
attention_probs_dropout_prob=0.1,
|
| 116 |
+
max_position_embeddings=512,
|
| 117 |
+
type_vocab_size=0,
|
| 118 |
+
initializer_range=0.02,
|
| 119 |
+
layer_norm_eps=1e-7,
|
| 120 |
+
relative_attention=False,
|
| 121 |
+
max_relative_positions=-1,
|
| 122 |
+
pad_token_id=0,
|
| 123 |
+
position_biased_input=True,
|
| 124 |
+
pos_att_type=None,
|
| 125 |
+
pooler_dropout=0,
|
| 126 |
+
pooler_hidden_act="gelu",
|
| 127 |
+
legacy=True,
|
| 128 |
+
**kwargs,
|
| 129 |
+
):
|
| 130 |
+
super().__init__(**kwargs)
|
| 131 |
+
|
| 132 |
+
self.hidden_size = hidden_size
|
| 133 |
+
self.num_hidden_layers = num_hidden_layers
|
| 134 |
+
self.num_attention_heads = num_attention_heads
|
| 135 |
+
self.intermediate_size = intermediate_size
|
| 136 |
+
self.hidden_act = hidden_act
|
| 137 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 138 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 139 |
+
self.max_position_embeddings = max_position_embeddings
|
| 140 |
+
self.type_vocab_size = type_vocab_size
|
| 141 |
+
self.initializer_range = initializer_range
|
| 142 |
+
self.relative_attention = relative_attention
|
| 143 |
+
self.max_relative_positions = max_relative_positions
|
| 144 |
+
self.pad_token_id = pad_token_id
|
| 145 |
+
self.position_biased_input = position_biased_input
|
| 146 |
+
|
| 147 |
+
# Backwards compatibility
|
| 148 |
+
if isinstance(pos_att_type, str):
|
| 149 |
+
pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
|
| 150 |
+
|
| 151 |
+
self.pos_att_type = pos_att_type
|
| 152 |
+
self.vocab_size = vocab_size
|
| 153 |
+
self.layer_norm_eps = layer_norm_eps
|
| 154 |
+
|
| 155 |
+
self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size)
|
| 156 |
+
self.pooler_dropout = pooler_dropout
|
| 157 |
+
self.pooler_hidden_act = pooler_hidden_act
|
| 158 |
+
self.legacy = legacy
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
class DebertaV2OnnxConfig(OnnxConfig):
|
| 162 |
+
@property
|
| 163 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
| 164 |
+
if self.task == "multiple-choice":
|
| 165 |
+
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
|
| 166 |
+
else:
|
| 167 |
+
dynamic_axis = {0: "batch", 1: "sequence"}
|
| 168 |
+
if self._config.type_vocab_size > 0:
|
| 169 |
+
return OrderedDict(
|
| 170 |
+
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]
|
| 171 |
+
)
|
| 172 |
+
else:
|
| 173 |
+
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
|
| 174 |
+
|
| 175 |
+
@property
|
| 176 |
+
def default_onnx_opset(self) -> int:
|
| 177 |
+
return 12
|
| 178 |
+
|
| 179 |
+
def generate_dummy_inputs(
|
| 180 |
+
self,
|
| 181 |
+
preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
|
| 182 |
+
batch_size: int = -1,
|
| 183 |
+
seq_length: int = -1,
|
| 184 |
+
num_choices: int = -1,
|
| 185 |
+
is_pair: bool = False,
|
| 186 |
+
framework: Optional["TensorType"] = None,
|
| 187 |
+
num_channels: int = 3,
|
| 188 |
+
image_width: int = 40,
|
| 189 |
+
image_height: int = 40,
|
| 190 |
+
tokenizer: "PreTrainedTokenizerBase" = None,
|
| 191 |
+
) -> Mapping[str, Any]:
|
| 192 |
+
dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
|
| 193 |
+
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
|
| 194 |
+
del dummy_inputs["token_type_ids"]
|
| 195 |
+
return dummy_inputs
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
__all__ = ["DebertaV2Config", "DebertaV2OnnxConfig"]
|
janus/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py
ADDED
|
@@ -0,0 +1,1519 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2020 Microsoft and the Hugging Face Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""PyTorch DeBERTa-v2 model."""
|
| 16 |
+
|
| 17 |
+
from collections.abc import Sequence
|
| 18 |
+
from typing import Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.utils.checkpoint
|
| 22 |
+
from torch import nn
|
| 23 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
|
| 24 |
+
|
| 25 |
+
from ...activations import ACT2FN
|
| 26 |
+
from ...modeling_outputs import (
|
| 27 |
+
BaseModelOutput,
|
| 28 |
+
MaskedLMOutput,
|
| 29 |
+
MultipleChoiceModelOutput,
|
| 30 |
+
QuestionAnsweringModelOutput,
|
| 31 |
+
SequenceClassifierOutput,
|
| 32 |
+
TokenClassifierOutput,
|
| 33 |
+
)
|
| 34 |
+
from ...modeling_utils import PreTrainedModel
|
| 35 |
+
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
|
| 36 |
+
from .configuration_deberta_v2 import DebertaV2Config
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
logger = logging.get_logger(__name__)
|
| 40 |
+
|
| 41 |
+
_CONFIG_FOR_DOC = "DebertaV2Config"
|
| 42 |
+
_CHECKPOINT_FOR_DOC = "microsoft/deberta-v2-xlarge"
|
| 43 |
+
_QA_TARGET_START_INDEX = 2
|
| 44 |
+
_QA_TARGET_END_INDEX = 9
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm
|
| 48 |
+
class DebertaV2SelfOutput(nn.Module):
|
| 49 |
+
def __init__(self, config):
|
| 50 |
+
super().__init__()
|
| 51 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 52 |
+
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
|
| 53 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 54 |
+
|
| 55 |
+
def forward(self, hidden_states, input_tensor):
|
| 56 |
+
hidden_states = self.dense(hidden_states)
|
| 57 |
+
hidden_states = self.dropout(hidden_states)
|
| 58 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 59 |
+
return hidden_states
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@torch.jit.script
|
| 63 |
+
def make_log_bucket_position(relative_pos, bucket_size: int, max_position: int):
|
| 64 |
+
sign = torch.sign(relative_pos)
|
| 65 |
+
mid = bucket_size // 2
|
| 66 |
+
abs_pos = torch.where(
|
| 67 |
+
(relative_pos < mid) & (relative_pos > -mid),
|
| 68 |
+
torch.tensor(mid - 1).type_as(relative_pos),
|
| 69 |
+
torch.abs(relative_pos),
|
| 70 |
+
)
|
| 71 |
+
log_pos = (
|
| 72 |
+
torch.ceil(torch.log(abs_pos / mid) / torch.log(torch.tensor((max_position - 1) / mid)) * (mid - 1)) + mid
|
| 73 |
+
)
|
| 74 |
+
bucket_pos = torch.where(abs_pos <= mid, relative_pos.type_as(log_pos), log_pos * sign)
|
| 75 |
+
return bucket_pos
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def build_relative_position(query_layer, key_layer, bucket_size: int = -1, max_position: int = -1):
|
| 79 |
+
"""
|
| 80 |
+
Build relative position according to the query and key
|
| 81 |
+
|
| 82 |
+
We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
|
| 83 |
+
\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
|
| 84 |
+
P_k\\)
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
query_size (int): the length of query
|
| 88 |
+
key_size (int): the length of key
|
| 89 |
+
bucket_size (int): the size of position bucket
|
| 90 |
+
max_position (int): the maximum allowed absolute position
|
| 91 |
+
device (`torch.device`): the device on which tensors will be created.
|
| 92 |
+
|
| 93 |
+
Return:
|
| 94 |
+
`torch.LongTensor`: A tensor with shape [1, query_size, key_size]
|
| 95 |
+
"""
|
| 96 |
+
query_size = query_layer.size(-2)
|
| 97 |
+
key_size = key_layer.size(-2)
|
| 98 |
+
|
| 99 |
+
q_ids = torch.arange(query_size, dtype=torch.long, device=query_layer.device)
|
| 100 |
+
k_ids = torch.arange(key_size, dtype=torch.long, device=key_layer.device)
|
| 101 |
+
rel_pos_ids = q_ids[:, None] - k_ids[None, :]
|
| 102 |
+
if bucket_size > 0 and max_position > 0:
|
| 103 |
+
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
|
| 104 |
+
rel_pos_ids = rel_pos_ids.to(torch.long)
|
| 105 |
+
rel_pos_ids = rel_pos_ids[:query_size, :]
|
| 106 |
+
rel_pos_ids = rel_pos_ids.unsqueeze(0)
|
| 107 |
+
return rel_pos_ids
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@torch.jit.script
|
| 111 |
+
# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand
|
| 112 |
+
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
|
| 113 |
+
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@torch.jit.script
|
| 117 |
+
# Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand
|
| 118 |
+
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
|
| 119 |
+
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
@torch.jit.script
|
| 123 |
+
# Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand
|
| 124 |
+
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
|
| 125 |
+
return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
@torch.jit.script
|
| 129 |
+
def scaled_size_sqrt(query_layer: torch.Tensor, scale_factor: int):
|
| 130 |
+
return torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
@torch.jit.script
|
| 134 |
+
def build_rpos(query_layer, key_layer, relative_pos, position_buckets: int, max_relative_positions: int):
|
| 135 |
+
if key_layer.size(-2) != query_layer.size(-2):
|
| 136 |
+
return build_relative_position(
|
| 137 |
+
key_layer,
|
| 138 |
+
key_layer,
|
| 139 |
+
bucket_size=position_buckets,
|
| 140 |
+
max_position=max_relative_positions,
|
| 141 |
+
)
|
| 142 |
+
else:
|
| 143 |
+
return relative_pos
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class DisentangledSelfAttention(nn.Module):
|
| 147 |
+
"""
|
| 148 |
+
Disentangled self-attention module
|
| 149 |
+
|
| 150 |
+
Parameters:
|
| 151 |
+
config (`DebertaV2Config`):
|
| 152 |
+
A model config class instance with the configuration to build a new model. The schema is similar to
|
| 153 |
+
*BertConfig*, for more details, please refer [`DebertaV2Config`]
|
| 154 |
+
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
def __init__(self, config):
|
| 158 |
+
super().__init__()
|
| 159 |
+
if config.hidden_size % config.num_attention_heads != 0:
|
| 160 |
+
raise ValueError(
|
| 161 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
| 162 |
+
f"heads ({config.num_attention_heads})"
|
| 163 |
+
)
|
| 164 |
+
self.num_attention_heads = config.num_attention_heads
|
| 165 |
+
_attention_head_size = config.hidden_size // config.num_attention_heads
|
| 166 |
+
self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
|
| 167 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 168 |
+
self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
|
| 169 |
+
self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
|
| 170 |
+
self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
|
| 171 |
+
|
| 172 |
+
self.share_att_key = getattr(config, "share_att_key", False)
|
| 173 |
+
self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
|
| 174 |
+
self.relative_attention = getattr(config, "relative_attention", False)
|
| 175 |
+
|
| 176 |
+
if self.relative_attention:
|
| 177 |
+
self.position_buckets = getattr(config, "position_buckets", -1)
|
| 178 |
+
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
|
| 179 |
+
if self.max_relative_positions < 1:
|
| 180 |
+
self.max_relative_positions = config.max_position_embeddings
|
| 181 |
+
self.pos_ebd_size = self.max_relative_positions
|
| 182 |
+
if self.position_buckets > 0:
|
| 183 |
+
self.pos_ebd_size = self.position_buckets
|
| 184 |
+
|
| 185 |
+
self.pos_dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 186 |
+
|
| 187 |
+
if not self.share_att_key:
|
| 188 |
+
if "c2p" in self.pos_att_type:
|
| 189 |
+
self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
|
| 190 |
+
if "p2c" in self.pos_att_type:
|
| 191 |
+
self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)
|
| 192 |
+
|
| 193 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 194 |
+
|
| 195 |
+
def transpose_for_scores(self, x, attention_heads) -> torch.Tensor:
|
| 196 |
+
new_x_shape = x.size()[:-1] + (attention_heads, -1)
|
| 197 |
+
x = x.view(new_x_shape)
|
| 198 |
+
return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))
|
| 199 |
+
|
| 200 |
+
def forward(
|
| 201 |
+
self,
|
| 202 |
+
hidden_states,
|
| 203 |
+
attention_mask,
|
| 204 |
+
output_attentions=False,
|
| 205 |
+
query_states=None,
|
| 206 |
+
relative_pos=None,
|
| 207 |
+
rel_embeddings=None,
|
| 208 |
+
):
|
| 209 |
+
"""
|
| 210 |
+
Call the module
|
| 211 |
+
|
| 212 |
+
Args:
|
| 213 |
+
hidden_states (`torch.FloatTensor`):
|
| 214 |
+
Input states to the module usually the output from previous layer, it will be the Q,K and V in
|
| 215 |
+
*Attention(Q,K,V)*
|
| 216 |
+
|
| 217 |
+
attention_mask (`torch.BoolTensor`):
|
| 218 |
+
An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
|
| 219 |
+
sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
|
| 220 |
+
th token.
|
| 221 |
+
|
| 222 |
+
output_attentions (`bool`, *optional*):
|
| 223 |
+
Whether return the attention matrix.
|
| 224 |
+
|
| 225 |
+
query_states (`torch.FloatTensor`, *optional*):
|
| 226 |
+
The *Q* state in *Attention(Q,K,V)*.
|
| 227 |
+
|
| 228 |
+
relative_pos (`torch.LongTensor`):
|
| 229 |
+
The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
|
| 230 |
+
values ranging in [*-max_relative_positions*, *max_relative_positions*].
|
| 231 |
+
|
| 232 |
+
rel_embeddings (`torch.FloatTensor`):
|
| 233 |
+
The embedding of relative distances. It's a tensor of shape [\\(2 \\times
|
| 234 |
+
\\text{max_relative_positions}\\), *hidden_size*].
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
"""
|
| 238 |
+
if query_states is None:
|
| 239 |
+
query_states = hidden_states
|
| 240 |
+
query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
|
| 241 |
+
key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
|
| 242 |
+
value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
|
| 243 |
+
|
| 244 |
+
rel_att = None
|
| 245 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 246 |
+
scale_factor = 1
|
| 247 |
+
if "c2p" in self.pos_att_type:
|
| 248 |
+
scale_factor += 1
|
| 249 |
+
if "p2c" in self.pos_att_type:
|
| 250 |
+
scale_factor += 1
|
| 251 |
+
scale = scaled_size_sqrt(query_layer, scale_factor)
|
| 252 |
+
attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2) / scale.to(dtype=query_layer.dtype))
|
| 253 |
+
if self.relative_attention:
|
| 254 |
+
rel_embeddings = self.pos_dropout(rel_embeddings)
|
| 255 |
+
rel_att = self.disentangled_attention_bias(
|
| 256 |
+
query_layer, key_layer, relative_pos, rel_embeddings, scale_factor
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
if rel_att is not None:
|
| 260 |
+
attention_scores = attention_scores + rel_att
|
| 261 |
+
attention_scores = attention_scores
|
| 262 |
+
attention_scores = attention_scores.view(
|
| 263 |
+
-1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1)
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
attention_mask = attention_mask.bool()
|
| 267 |
+
attention_scores = attention_scores.masked_fill(~(attention_mask), torch.finfo(query_layer.dtype).min)
|
| 268 |
+
# bsz x height x length x dimension
|
| 269 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 270 |
+
|
| 271 |
+
attention_probs = self.dropout(attention_probs)
|
| 272 |
+
context_layer = torch.bmm(
|
| 273 |
+
attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer
|
| 274 |
+
)
|
| 275 |
+
context_layer = (
|
| 276 |
+
context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1))
|
| 277 |
+
.permute(0, 2, 1, 3)
|
| 278 |
+
.contiguous()
|
| 279 |
+
)
|
| 280 |
+
new_context_layer_shape = context_layer.size()[:-2] + (-1,)
|
| 281 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
| 282 |
+
if not output_attentions:
|
| 283 |
+
return (context_layer, None)
|
| 284 |
+
return (context_layer, attention_probs)
|
| 285 |
+
|
| 286 |
+
def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
|
| 287 |
+
if relative_pos is None:
|
| 288 |
+
relative_pos = build_relative_position(
|
| 289 |
+
query_layer,
|
| 290 |
+
key_layer,
|
| 291 |
+
bucket_size=self.position_buckets,
|
| 292 |
+
max_position=self.max_relative_positions,
|
| 293 |
+
)
|
| 294 |
+
if relative_pos.dim() == 2:
|
| 295 |
+
relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
|
| 296 |
+
elif relative_pos.dim() == 3:
|
| 297 |
+
relative_pos = relative_pos.unsqueeze(1)
|
| 298 |
+
# bsz x height x query x key
|
| 299 |
+
elif relative_pos.dim() != 4:
|
| 300 |
+
raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
|
| 301 |
+
|
| 302 |
+
att_span = self.pos_ebd_size
|
| 303 |
+
relative_pos = relative_pos.long().to(query_layer.device)
|
| 304 |
+
|
| 305 |
+
rel_embeddings = rel_embeddings[0 : att_span * 2, :].unsqueeze(0)
|
| 306 |
+
if self.share_att_key:
|
| 307 |
+
pos_query_layer = self.transpose_for_scores(
|
| 308 |
+
self.query_proj(rel_embeddings), self.num_attention_heads
|
| 309 |
+
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
|
| 310 |
+
pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(
|
| 311 |
+
query_layer.size(0) // self.num_attention_heads, 1, 1
|
| 312 |
+
)
|
| 313 |
+
else:
|
| 314 |
+
if "c2p" in self.pos_att_type:
|
| 315 |
+
pos_key_layer = self.transpose_for_scores(
|
| 316 |
+
self.pos_key_proj(rel_embeddings), self.num_attention_heads
|
| 317 |
+
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1)
|
| 318 |
+
if "p2c" in self.pos_att_type:
|
| 319 |
+
pos_query_layer = self.transpose_for_scores(
|
| 320 |
+
self.pos_query_proj(rel_embeddings), self.num_attention_heads
|
| 321 |
+
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1)
|
| 322 |
+
|
| 323 |
+
score = 0
|
| 324 |
+
# content->position
|
| 325 |
+
if "c2p" in self.pos_att_type:
|
| 326 |
+
scale = scaled_size_sqrt(pos_key_layer, scale_factor)
|
| 327 |
+
c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))
|
| 328 |
+
c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
|
| 329 |
+
c2p_att = torch.gather(
|
| 330 |
+
c2p_att,
|
| 331 |
+
dim=-1,
|
| 332 |
+
index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]),
|
| 333 |
+
)
|
| 334 |
+
score += c2p_att / scale.to(dtype=c2p_att.dtype)
|
| 335 |
+
|
| 336 |
+
# position->content
|
| 337 |
+
if "p2c" in self.pos_att_type:
|
| 338 |
+
scale = scaled_size_sqrt(pos_query_layer, scale_factor)
|
| 339 |
+
r_pos = build_rpos(
|
| 340 |
+
query_layer,
|
| 341 |
+
key_layer,
|
| 342 |
+
relative_pos,
|
| 343 |
+
self.max_relative_positions,
|
| 344 |
+
self.position_buckets,
|
| 345 |
+
)
|
| 346 |
+
p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
|
| 347 |
+
p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))
|
| 348 |
+
p2c_att = torch.gather(
|
| 349 |
+
p2c_att,
|
| 350 |
+
dim=-1,
|
| 351 |
+
index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]),
|
| 352 |
+
).transpose(-1, -2)
|
| 353 |
+
score += p2c_att / scale.to(dtype=p2c_att.dtype)
|
| 354 |
+
|
| 355 |
+
return score
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2
|
| 359 |
+
class DebertaV2Attention(nn.Module):
|
| 360 |
+
def __init__(self, config):
|
| 361 |
+
super().__init__()
|
| 362 |
+
self.self = DisentangledSelfAttention(config)
|
| 363 |
+
self.output = DebertaV2SelfOutput(config)
|
| 364 |
+
self.config = config
|
| 365 |
+
|
| 366 |
+
def forward(
|
| 367 |
+
self,
|
| 368 |
+
hidden_states,
|
| 369 |
+
attention_mask,
|
| 370 |
+
output_attentions: bool = False,
|
| 371 |
+
query_states=None,
|
| 372 |
+
relative_pos=None,
|
| 373 |
+
rel_embeddings=None,
|
| 374 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 375 |
+
self_output, att_matrix = self.self(
|
| 376 |
+
hidden_states,
|
| 377 |
+
attention_mask,
|
| 378 |
+
output_attentions,
|
| 379 |
+
query_states=query_states,
|
| 380 |
+
relative_pos=relative_pos,
|
| 381 |
+
rel_embeddings=rel_embeddings,
|
| 382 |
+
)
|
| 383 |
+
if query_states is None:
|
| 384 |
+
query_states = hidden_states
|
| 385 |
+
attention_output = self.output(self_output, query_states)
|
| 386 |
+
|
| 387 |
+
if output_attentions:
|
| 388 |
+
return (attention_output, att_matrix)
|
| 389 |
+
else:
|
| 390 |
+
return (attention_output, None)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2
|
| 394 |
+
class DebertaV2Intermediate(nn.Module):
|
| 395 |
+
def __init__(self, config):
|
| 396 |
+
super().__init__()
|
| 397 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 398 |
+
if isinstance(config.hidden_act, str):
|
| 399 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
| 400 |
+
else:
|
| 401 |
+
self.intermediate_act_fn = config.hidden_act
|
| 402 |
+
|
| 403 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 404 |
+
hidden_states = self.dense(hidden_states)
|
| 405 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 406 |
+
return hidden_states
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm
|
| 410 |
+
class DebertaV2Output(nn.Module):
|
| 411 |
+
def __init__(self, config):
|
| 412 |
+
super().__init__()
|
| 413 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 414 |
+
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
|
| 415 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 416 |
+
self.config = config
|
| 417 |
+
|
| 418 |
+
def forward(self, hidden_states, input_tensor):
|
| 419 |
+
hidden_states = self.dense(hidden_states)
|
| 420 |
+
hidden_states = self.dropout(hidden_states)
|
| 421 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 422 |
+
return hidden_states
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2
|
| 426 |
+
class DebertaV2Layer(nn.Module):
|
| 427 |
+
def __init__(self, config):
|
| 428 |
+
super().__init__()
|
| 429 |
+
self.attention = DebertaV2Attention(config)
|
| 430 |
+
self.intermediate = DebertaV2Intermediate(config)
|
| 431 |
+
self.output = DebertaV2Output(config)
|
| 432 |
+
|
| 433 |
+
def forward(
|
| 434 |
+
self,
|
| 435 |
+
hidden_states,
|
| 436 |
+
attention_mask,
|
| 437 |
+
query_states=None,
|
| 438 |
+
relative_pos=None,
|
| 439 |
+
rel_embeddings=None,
|
| 440 |
+
output_attentions: bool = False,
|
| 441 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 442 |
+
attention_output, att_matrix = self.attention(
|
| 443 |
+
hidden_states,
|
| 444 |
+
attention_mask,
|
| 445 |
+
output_attentions=output_attentions,
|
| 446 |
+
query_states=query_states,
|
| 447 |
+
relative_pos=relative_pos,
|
| 448 |
+
rel_embeddings=rel_embeddings,
|
| 449 |
+
)
|
| 450 |
+
intermediate_output = self.intermediate(attention_output)
|
| 451 |
+
layer_output = self.output(intermediate_output, attention_output)
|
| 452 |
+
|
| 453 |
+
if output_attentions:
|
| 454 |
+
return (layer_output, att_matrix)
|
| 455 |
+
else:
|
| 456 |
+
return (layer_output, None)
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
class ConvLayer(nn.Module):
|
| 460 |
+
def __init__(self, config):
|
| 461 |
+
super().__init__()
|
| 462 |
+
kernel_size = getattr(config, "conv_kernel_size", 3)
|
| 463 |
+
groups = getattr(config, "conv_groups", 1)
|
| 464 |
+
self.conv_act = getattr(config, "conv_act", "tanh")
|
| 465 |
+
self.conv = nn.Conv1d(
|
| 466 |
+
config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups
|
| 467 |
+
)
|
| 468 |
+
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
|
| 469 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 470 |
+
self.config = config
|
| 471 |
+
|
| 472 |
+
def forward(self, hidden_states, residual_states, input_mask):
|
| 473 |
+
out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
|
| 474 |
+
rmask = (1 - input_mask).bool()
|
| 475 |
+
out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)
|
| 476 |
+
out = ACT2FN[self.conv_act](self.dropout(out))
|
| 477 |
+
|
| 478 |
+
layer_norm_input = residual_states + out
|
| 479 |
+
output = self.LayerNorm(layer_norm_input).to(layer_norm_input)
|
| 480 |
+
|
| 481 |
+
if input_mask is None:
|
| 482 |
+
output_states = output
|
| 483 |
+
else:
|
| 484 |
+
if input_mask.dim() != layer_norm_input.dim():
|
| 485 |
+
if input_mask.dim() == 4:
|
| 486 |
+
input_mask = input_mask.squeeze(1).squeeze(1)
|
| 487 |
+
input_mask = input_mask.unsqueeze(2)
|
| 488 |
+
|
| 489 |
+
input_mask = input_mask.to(output.dtype)
|
| 490 |
+
output_states = output * input_mask
|
| 491 |
+
|
| 492 |
+
return output_states
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm,Deberta->DebertaV2
|
| 496 |
+
class DebertaV2Embeddings(nn.Module):
|
| 497 |
+
"""Construct the embeddings from word, position and token_type embeddings."""
|
| 498 |
+
|
| 499 |
+
def __init__(self, config):
|
| 500 |
+
super().__init__()
|
| 501 |
+
pad_token_id = getattr(config, "pad_token_id", 0)
|
| 502 |
+
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
|
| 503 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
|
| 504 |
+
|
| 505 |
+
self.position_biased_input = getattr(config, "position_biased_input", True)
|
| 506 |
+
if not self.position_biased_input:
|
| 507 |
+
self.position_embeddings = None
|
| 508 |
+
else:
|
| 509 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
|
| 510 |
+
|
| 511 |
+
if config.type_vocab_size > 0:
|
| 512 |
+
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
|
| 513 |
+
else:
|
| 514 |
+
self.token_type_embeddings = None
|
| 515 |
+
|
| 516 |
+
if self.embedding_size != config.hidden_size:
|
| 517 |
+
self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
|
| 518 |
+
else:
|
| 519 |
+
self.embed_proj = None
|
| 520 |
+
|
| 521 |
+
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
|
| 522 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 523 |
+
self.config = config
|
| 524 |
+
|
| 525 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
| 526 |
+
self.register_buffer(
|
| 527 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
|
| 531 |
+
if input_ids is not None:
|
| 532 |
+
input_shape = input_ids.size()
|
| 533 |
+
else:
|
| 534 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 535 |
+
|
| 536 |
+
seq_length = input_shape[1]
|
| 537 |
+
|
| 538 |
+
if position_ids is None:
|
| 539 |
+
position_ids = self.position_ids[:, :seq_length]
|
| 540 |
+
|
| 541 |
+
if token_type_ids is None:
|
| 542 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
|
| 543 |
+
|
| 544 |
+
if inputs_embeds is None:
|
| 545 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
| 546 |
+
|
| 547 |
+
if self.position_embeddings is not None:
|
| 548 |
+
position_embeddings = self.position_embeddings(position_ids.long())
|
| 549 |
+
else:
|
| 550 |
+
position_embeddings = torch.zeros_like(inputs_embeds)
|
| 551 |
+
|
| 552 |
+
embeddings = inputs_embeds
|
| 553 |
+
if self.position_biased_input:
|
| 554 |
+
embeddings += position_embeddings
|
| 555 |
+
if self.token_type_embeddings is not None:
|
| 556 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
| 557 |
+
embeddings += token_type_embeddings
|
| 558 |
+
|
| 559 |
+
if self.embed_proj is not None:
|
| 560 |
+
embeddings = self.embed_proj(embeddings)
|
| 561 |
+
|
| 562 |
+
embeddings = self.LayerNorm(embeddings)
|
| 563 |
+
|
| 564 |
+
if mask is not None:
|
| 565 |
+
if mask.dim() != embeddings.dim():
|
| 566 |
+
if mask.dim() == 4:
|
| 567 |
+
mask = mask.squeeze(1).squeeze(1)
|
| 568 |
+
mask = mask.unsqueeze(2)
|
| 569 |
+
mask = mask.to(embeddings.dtype)
|
| 570 |
+
|
| 571 |
+
embeddings = embeddings * mask
|
| 572 |
+
|
| 573 |
+
embeddings = self.dropout(embeddings)
|
| 574 |
+
return embeddings
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
class DebertaV2Encoder(nn.Module):
|
| 578 |
+
"""Modified BertEncoder with relative position bias support"""
|
| 579 |
+
|
| 580 |
+
def __init__(self, config):
|
| 581 |
+
super().__init__()
|
| 582 |
+
|
| 583 |
+
self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])
|
| 584 |
+
self.relative_attention = getattr(config, "relative_attention", False)
|
| 585 |
+
|
| 586 |
+
if self.relative_attention:
|
| 587 |
+
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
|
| 588 |
+
if self.max_relative_positions < 1:
|
| 589 |
+
self.max_relative_positions = config.max_position_embeddings
|
| 590 |
+
|
| 591 |
+
self.position_buckets = getattr(config, "position_buckets", -1)
|
| 592 |
+
pos_ebd_size = self.max_relative_positions * 2
|
| 593 |
+
|
| 594 |
+
if self.position_buckets > 0:
|
| 595 |
+
pos_ebd_size = self.position_buckets * 2
|
| 596 |
+
|
| 597 |
+
self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
|
| 598 |
+
|
| 599 |
+
self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
|
| 600 |
+
|
| 601 |
+
if "layer_norm" in self.norm_rel_ebd:
|
| 602 |
+
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
|
| 603 |
+
|
| 604 |
+
self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None
|
| 605 |
+
self.gradient_checkpointing = False
|
| 606 |
+
|
| 607 |
+
def get_rel_embedding(self):
|
| 608 |
+
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
|
| 609 |
+
if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
|
| 610 |
+
rel_embeddings = self.LayerNorm(rel_embeddings)
|
| 611 |
+
return rel_embeddings
|
| 612 |
+
|
| 613 |
+
def get_attention_mask(self, attention_mask):
|
| 614 |
+
if attention_mask.dim() <= 2:
|
| 615 |
+
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
| 616 |
+
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
|
| 617 |
+
elif attention_mask.dim() == 3:
|
| 618 |
+
attention_mask = attention_mask.unsqueeze(1)
|
| 619 |
+
|
| 620 |
+
return attention_mask
|
| 621 |
+
|
| 622 |
+
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
|
| 623 |
+
if self.relative_attention and relative_pos is None:
|
| 624 |
+
if query_states is not None:
|
| 625 |
+
relative_pos = build_relative_position(
|
| 626 |
+
query_states,
|
| 627 |
+
hidden_states,
|
| 628 |
+
bucket_size=self.position_buckets,
|
| 629 |
+
max_position=self.max_relative_positions,
|
| 630 |
+
)
|
| 631 |
+
else:
|
| 632 |
+
relative_pos = build_relative_position(
|
| 633 |
+
hidden_states,
|
| 634 |
+
hidden_states,
|
| 635 |
+
bucket_size=self.position_buckets,
|
| 636 |
+
max_position=self.max_relative_positions,
|
| 637 |
+
)
|
| 638 |
+
return relative_pos
|
| 639 |
+
|
| 640 |
+
def forward(
|
| 641 |
+
self,
|
| 642 |
+
hidden_states,
|
| 643 |
+
attention_mask,
|
| 644 |
+
output_hidden_states=True,
|
| 645 |
+
output_attentions=False,
|
| 646 |
+
query_states=None,
|
| 647 |
+
relative_pos=None,
|
| 648 |
+
return_dict=True,
|
| 649 |
+
):
|
| 650 |
+
if attention_mask.dim() <= 2:
|
| 651 |
+
input_mask = attention_mask
|
| 652 |
+
else:
|
| 653 |
+
input_mask = attention_mask.sum(-2) > 0
|
| 654 |
+
attention_mask = self.get_attention_mask(attention_mask)
|
| 655 |
+
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
|
| 656 |
+
|
| 657 |
+
all_hidden_states: Optional[Tuple[torch.Tensor]] = (hidden_states,) if output_hidden_states else None
|
| 658 |
+
all_attentions = () if output_attentions else None
|
| 659 |
+
|
| 660 |
+
next_kv = hidden_states
|
| 661 |
+
rel_embeddings = self.get_rel_embedding()
|
| 662 |
+
for i, layer_module in enumerate(self.layer):
|
| 663 |
+
if self.gradient_checkpointing and self.training:
|
| 664 |
+
output_states, attn_weights = self._gradient_checkpointing_func(
|
| 665 |
+
layer_module.__call__,
|
| 666 |
+
next_kv,
|
| 667 |
+
attention_mask,
|
| 668 |
+
query_states,
|
| 669 |
+
relative_pos,
|
| 670 |
+
rel_embeddings,
|
| 671 |
+
output_attentions,
|
| 672 |
+
)
|
| 673 |
+
else:
|
| 674 |
+
output_states, attn_weights = layer_module(
|
| 675 |
+
next_kv,
|
| 676 |
+
attention_mask,
|
| 677 |
+
query_states=query_states,
|
| 678 |
+
relative_pos=relative_pos,
|
| 679 |
+
rel_embeddings=rel_embeddings,
|
| 680 |
+
output_attentions=output_attentions,
|
| 681 |
+
)
|
| 682 |
+
|
| 683 |
+
if output_attentions:
|
| 684 |
+
all_attentions = all_attentions + (attn_weights,)
|
| 685 |
+
|
| 686 |
+
if i == 0 and self.conv is not None:
|
| 687 |
+
output_states = self.conv(hidden_states, output_states, input_mask)
|
| 688 |
+
|
| 689 |
+
if output_hidden_states:
|
| 690 |
+
all_hidden_states = all_hidden_states + (output_states,)
|
| 691 |
+
|
| 692 |
+
if query_states is not None:
|
| 693 |
+
query_states = output_states
|
| 694 |
+
if isinstance(hidden_states, Sequence):
|
| 695 |
+
next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
|
| 696 |
+
else:
|
| 697 |
+
next_kv = output_states
|
| 698 |
+
|
| 699 |
+
if not return_dict:
|
| 700 |
+
return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
|
| 701 |
+
return BaseModelOutput(
|
| 702 |
+
last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2
|
| 707 |
+
class DebertaV2PreTrainedModel(PreTrainedModel):
|
| 708 |
+
"""
|
| 709 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 710 |
+
models.
|
| 711 |
+
"""
|
| 712 |
+
|
| 713 |
+
config_class = DebertaV2Config
|
| 714 |
+
base_model_prefix = "deberta"
|
| 715 |
+
_keys_to_ignore_on_load_unexpected = ["position_embeddings"]
|
| 716 |
+
supports_gradient_checkpointing = True
|
| 717 |
+
|
| 718 |
+
def _init_weights(self, module):
|
| 719 |
+
"""Initialize the weights."""
|
| 720 |
+
if isinstance(module, nn.Linear):
|
| 721 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 722 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 723 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 724 |
+
if module.bias is not None:
|
| 725 |
+
module.bias.data.zero_()
|
| 726 |
+
elif isinstance(module, nn.Embedding):
|
| 727 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 728 |
+
if module.padding_idx is not None:
|
| 729 |
+
module.weight.data[module.padding_idx].zero_()
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
DEBERTA_START_DOCSTRING = r"""
|
| 733 |
+
The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
|
| 734 |
+
Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
|
| 735 |
+
on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
|
| 736 |
+
improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
|
| 737 |
+
|
| 738 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 739 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 740 |
+
and behavior.
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
Parameters:
|
| 744 |
+
config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.
|
| 745 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 746 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 747 |
+
"""
|
| 748 |
+
|
| 749 |
+
DEBERTA_INPUTS_DOCSTRING = r"""
|
| 750 |
+
Args:
|
| 751 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
| 752 |
+
Indices of input sequence tokens in the vocabulary.
|
| 753 |
+
|
| 754 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 755 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 756 |
+
|
| 757 |
+
[What are input IDs?](../glossary#input-ids)
|
| 758 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
| 759 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 760 |
+
|
| 761 |
+
- 1 for tokens that are **not masked**,
|
| 762 |
+
- 0 for tokens that are **masked**.
|
| 763 |
+
|
| 764 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 765 |
+
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
| 766 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
| 767 |
+
1]`:
|
| 768 |
+
|
| 769 |
+
- 0 corresponds to a *sentence A* token,
|
| 770 |
+
- 1 corresponds to a *sentence B* token.
|
| 771 |
+
|
| 772 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
| 773 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
| 774 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 775 |
+
config.max_position_embeddings - 1]`.
|
| 776 |
+
|
| 777 |
+
[What are position IDs?](../glossary#position-ids)
|
| 778 |
+
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
|
| 779 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 780 |
+
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
|
| 781 |
+
model's internal embedding lookup matrix.
|
| 782 |
+
output_attentions (`bool`, *optional*):
|
| 783 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 784 |
+
tensors for more detail.
|
| 785 |
+
output_hidden_states (`bool`, *optional*):
|
| 786 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 787 |
+
more detail.
|
| 788 |
+
return_dict (`bool`, *optional*):
|
| 789 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 790 |
+
"""
|
| 791 |
+
|
| 792 |
+
|
| 793 |
+
@add_start_docstrings(
|
| 794 |
+
"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
|
| 795 |
+
DEBERTA_START_DOCSTRING,
|
| 796 |
+
)
|
| 797 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaModel with Deberta->DebertaV2
|
| 798 |
+
class DebertaV2Model(DebertaV2PreTrainedModel):
|
| 799 |
+
def __init__(self, config):
|
| 800 |
+
super().__init__(config)
|
| 801 |
+
|
| 802 |
+
self.embeddings = DebertaV2Embeddings(config)
|
| 803 |
+
self.encoder = DebertaV2Encoder(config)
|
| 804 |
+
self.z_steps = 0
|
| 805 |
+
self.config = config
|
| 806 |
+
# Initialize weights and apply final processing
|
| 807 |
+
self.post_init()
|
| 808 |
+
|
| 809 |
+
def get_input_embeddings(self):
|
| 810 |
+
return self.embeddings.word_embeddings
|
| 811 |
+
|
| 812 |
+
def set_input_embeddings(self, new_embeddings):
|
| 813 |
+
self.embeddings.word_embeddings = new_embeddings
|
| 814 |
+
|
| 815 |
+
def _prune_heads(self, heads_to_prune):
|
| 816 |
+
"""
|
| 817 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 818 |
+
class PreTrainedModel
|
| 819 |
+
"""
|
| 820 |
+
raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
|
| 821 |
+
|
| 822 |
+
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 823 |
+
@add_code_sample_docstrings(
|
| 824 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 825 |
+
output_type=BaseModelOutput,
|
| 826 |
+
config_class=_CONFIG_FOR_DOC,
|
| 827 |
+
)
|
| 828 |
+
def forward(
|
| 829 |
+
self,
|
| 830 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 831 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 832 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 833 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 834 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 835 |
+
output_attentions: Optional[bool] = None,
|
| 836 |
+
output_hidden_states: Optional[bool] = None,
|
| 837 |
+
return_dict: Optional[bool] = None,
|
| 838 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 839 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 840 |
+
output_hidden_states = (
|
| 841 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 842 |
+
)
|
| 843 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 844 |
+
|
| 845 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 846 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 847 |
+
elif input_ids is not None:
|
| 848 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
| 849 |
+
input_shape = input_ids.size()
|
| 850 |
+
elif inputs_embeds is not None:
|
| 851 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 852 |
+
else:
|
| 853 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 854 |
+
|
| 855 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 856 |
+
|
| 857 |
+
if attention_mask is None:
|
| 858 |
+
attention_mask = torch.ones(input_shape, device=device)
|
| 859 |
+
if token_type_ids is None:
|
| 860 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
| 861 |
+
|
| 862 |
+
embedding_output = self.embeddings(
|
| 863 |
+
input_ids=input_ids,
|
| 864 |
+
token_type_ids=token_type_ids,
|
| 865 |
+
position_ids=position_ids,
|
| 866 |
+
mask=attention_mask,
|
| 867 |
+
inputs_embeds=inputs_embeds,
|
| 868 |
+
)
|
| 869 |
+
|
| 870 |
+
encoder_outputs = self.encoder(
|
| 871 |
+
embedding_output,
|
| 872 |
+
attention_mask,
|
| 873 |
+
output_hidden_states=True,
|
| 874 |
+
output_attentions=output_attentions,
|
| 875 |
+
return_dict=return_dict,
|
| 876 |
+
)
|
| 877 |
+
encoded_layers = encoder_outputs[1]
|
| 878 |
+
|
| 879 |
+
if self.z_steps > 1:
|
| 880 |
+
hidden_states = encoded_layers[-2]
|
| 881 |
+
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
|
| 882 |
+
query_states = encoded_layers[-1]
|
| 883 |
+
rel_embeddings = self.encoder.get_rel_embedding()
|
| 884 |
+
attention_mask = self.encoder.get_attention_mask(attention_mask)
|
| 885 |
+
rel_pos = self.encoder.get_rel_pos(embedding_output)
|
| 886 |
+
for layer in layers[1:]:
|
| 887 |
+
query_states = layer(
|
| 888 |
+
hidden_states,
|
| 889 |
+
attention_mask,
|
| 890 |
+
output_attentions=False,
|
| 891 |
+
query_states=query_states,
|
| 892 |
+
relative_pos=rel_pos,
|
| 893 |
+
rel_embeddings=rel_embeddings,
|
| 894 |
+
)
|
| 895 |
+
encoded_layers.append(query_states)
|
| 896 |
+
|
| 897 |
+
sequence_output = encoded_layers[-1]
|
| 898 |
+
|
| 899 |
+
if not return_dict:
|
| 900 |
+
return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
|
| 901 |
+
|
| 902 |
+
return BaseModelOutput(
|
| 903 |
+
last_hidden_state=sequence_output,
|
| 904 |
+
hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
|
| 905 |
+
attentions=encoder_outputs.attentions,
|
| 906 |
+
)
|
| 907 |
+
|
| 908 |
+
|
| 909 |
+
# Copied from transformers.models.deberta.modeling_deberta.LegacyDebertaPredictionHeadTransform with Deberta->DebertaV2
|
| 910 |
+
class LegacyDebertaV2PredictionHeadTransform(nn.Module):
|
| 911 |
+
def __init__(self, config):
|
| 912 |
+
super().__init__()
|
| 913 |
+
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
|
| 914 |
+
|
| 915 |
+
self.dense = nn.Linear(config.hidden_size, self.embedding_size)
|
| 916 |
+
if isinstance(config.hidden_act, str):
|
| 917 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
| 918 |
+
else:
|
| 919 |
+
self.transform_act_fn = config.hidden_act
|
| 920 |
+
self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps)
|
| 921 |
+
|
| 922 |
+
def forward(self, hidden_states):
|
| 923 |
+
hidden_states = self.dense(hidden_states)
|
| 924 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
| 925 |
+
hidden_states = self.LayerNorm(hidden_states)
|
| 926 |
+
return hidden_states
|
| 927 |
+
|
| 928 |
+
|
| 929 |
+
class LegacyDebertaV2LMPredictionHead(nn.Module):
|
| 930 |
+
def __init__(self, config):
|
| 931 |
+
super().__init__()
|
| 932 |
+
self.transform = LegacyDebertaV2PredictionHeadTransform(config)
|
| 933 |
+
|
| 934 |
+
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
|
| 935 |
+
# The output weights are the same as the input embeddings, but there is
|
| 936 |
+
# an output-only bias for each token.
|
| 937 |
+
self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False)
|
| 938 |
+
|
| 939 |
+
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
| 940 |
+
|
| 941 |
+
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
| 942 |
+
self.decoder.bias = self.bias
|
| 943 |
+
|
| 944 |
+
def _tie_weights(self):
|
| 945 |
+
self.decoder.bias = self.bias
|
| 946 |
+
|
| 947 |
+
def forward(self, hidden_states):
|
| 948 |
+
hidden_states = self.transform(hidden_states)
|
| 949 |
+
hidden_states = self.decoder(hidden_states)
|
| 950 |
+
return hidden_states
|
| 951 |
+
|
| 952 |
+
|
| 953 |
+
class LegacyDebertaV2OnlyMLMHead(nn.Module):
|
| 954 |
+
def __init__(self, config):
|
| 955 |
+
super().__init__()
|
| 956 |
+
self.predictions = LegacyDebertaV2LMPredictionHead(config)
|
| 957 |
+
|
| 958 |
+
def forward(self, sequence_output):
|
| 959 |
+
prediction_scores = self.predictions(sequence_output)
|
| 960 |
+
return prediction_scores
|
| 961 |
+
|
| 962 |
+
|
| 963 |
+
class DebertaV2LMPredictionHead(nn.Module):
|
| 964 |
+
"""https://github.com/microsoft/DeBERTa/blob/master/DeBERTa/deberta/bert.py#L270"""
|
| 965 |
+
|
| 966 |
+
def __init__(self, config):
|
| 967 |
+
super().__init__()
|
| 968 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 969 |
+
|
| 970 |
+
if isinstance(config.hidden_act, str):
|
| 971 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
| 972 |
+
else:
|
| 973 |
+
self.transform_act_fn = config.hidden_act
|
| 974 |
+
|
| 975 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=True)
|
| 976 |
+
|
| 977 |
+
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
| 978 |
+
|
| 979 |
+
# note that the input embeddings must be passed as an argument
|
| 980 |
+
def forward(self, hidden_states, word_embeddings):
|
| 981 |
+
hidden_states = self.dense(hidden_states)
|
| 982 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
| 983 |
+
hidden_states = self.LayerNorm(hidden_states)
|
| 984 |
+
hidden_states = torch.matmul(hidden_states, word_embeddings.weight.t()) + self.bias
|
| 985 |
+
return hidden_states
|
| 986 |
+
|
| 987 |
+
|
| 988 |
+
class DebertaV2OnlyMLMHead(nn.Module):
|
| 989 |
+
def __init__(self, config):
|
| 990 |
+
super().__init__()
|
| 991 |
+
self.lm_head = DebertaV2LMPredictionHead(config)
|
| 992 |
+
|
| 993 |
+
# note that the input embeddings must be passed as an argument
|
| 994 |
+
def forward(self, sequence_output, word_embeddings):
|
| 995 |
+
prediction_scores = self.lm_head(sequence_output, word_embeddings)
|
| 996 |
+
return prediction_scores
|
| 997 |
+
|
| 998 |
+
|
| 999 |
+
@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
|
| 1000 |
+
class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):
|
| 1001 |
+
_tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
|
| 1002 |
+
_keys_to_ignore_on_load_unexpected = r"mask_predictions.*"
|
| 1003 |
+
|
| 1004 |
+
def __init__(self, config):
|
| 1005 |
+
super().__init__(config)
|
| 1006 |
+
self.legacy = config.legacy
|
| 1007 |
+
self.deberta = DebertaV2Model(config)
|
| 1008 |
+
if self.legacy:
|
| 1009 |
+
self.cls = LegacyDebertaV2OnlyMLMHead(config)
|
| 1010 |
+
else:
|
| 1011 |
+
self._tied_weights_keys = ["lm_predictions.lm_head.weight", "deberta.embeddings.word_embeddings.weight"]
|
| 1012 |
+
self.lm_predictions = DebertaV2OnlyMLMHead(config)
|
| 1013 |
+
# Initialize weights and apply final processing
|
| 1014 |
+
self.post_init()
|
| 1015 |
+
|
| 1016 |
+
def get_output_embeddings(self):
|
| 1017 |
+
if self.legacy:
|
| 1018 |
+
return self.cls.predictions.decoder
|
| 1019 |
+
else:
|
| 1020 |
+
return self.lm_predictions.lm_head.dense
|
| 1021 |
+
|
| 1022 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1023 |
+
if self.legacy:
|
| 1024 |
+
self.cls.predictions.decoder = new_embeddings
|
| 1025 |
+
self.cls.predictions.bias = new_embeddings.bias
|
| 1026 |
+
else:
|
| 1027 |
+
self.lm_predictions.lm_head.dense = new_embeddings
|
| 1028 |
+
self.lm_predictions.lm_head.bias = new_embeddings.bias
|
| 1029 |
+
|
| 1030 |
+
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1031 |
+
@add_code_sample_docstrings(
|
| 1032 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1033 |
+
output_type=MaskedLMOutput,
|
| 1034 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1035 |
+
mask="[MASK]",
|
| 1036 |
+
)
|
| 1037 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM.forward with Deberta->DebertaV2
|
| 1038 |
+
def forward(
|
| 1039 |
+
self,
|
| 1040 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1041 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1042 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1043 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1044 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1045 |
+
labels: Optional[torch.Tensor] = None,
|
| 1046 |
+
output_attentions: Optional[bool] = None,
|
| 1047 |
+
output_hidden_states: Optional[bool] = None,
|
| 1048 |
+
return_dict: Optional[bool] = None,
|
| 1049 |
+
) -> Union[Tuple, MaskedLMOutput]:
|
| 1050 |
+
r"""
|
| 1051 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1052 |
+
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
|
| 1053 |
+
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
|
| 1054 |
+
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
|
| 1055 |
+
"""
|
| 1056 |
+
|
| 1057 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1058 |
+
|
| 1059 |
+
outputs = self.deberta(
|
| 1060 |
+
input_ids,
|
| 1061 |
+
attention_mask=attention_mask,
|
| 1062 |
+
token_type_ids=token_type_ids,
|
| 1063 |
+
position_ids=position_ids,
|
| 1064 |
+
inputs_embeds=inputs_embeds,
|
| 1065 |
+
output_attentions=output_attentions,
|
| 1066 |
+
output_hidden_states=output_hidden_states,
|
| 1067 |
+
return_dict=return_dict,
|
| 1068 |
+
)
|
| 1069 |
+
|
| 1070 |
+
sequence_output = outputs[0]
|
| 1071 |
+
if self.legacy:
|
| 1072 |
+
prediction_scores = self.cls(sequence_output)
|
| 1073 |
+
else:
|
| 1074 |
+
prediction_scores = self.lm_predictions(sequence_output, self.deberta.embeddings.word_embeddings)
|
| 1075 |
+
|
| 1076 |
+
masked_lm_loss = None
|
| 1077 |
+
if labels is not None:
|
| 1078 |
+
loss_fct = CrossEntropyLoss() # -100 index = padding token
|
| 1079 |
+
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
|
| 1080 |
+
|
| 1081 |
+
if not return_dict:
|
| 1082 |
+
output = (prediction_scores,) + outputs[1:]
|
| 1083 |
+
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
| 1084 |
+
|
| 1085 |
+
return MaskedLMOutput(
|
| 1086 |
+
loss=masked_lm_loss,
|
| 1087 |
+
logits=prediction_scores,
|
| 1088 |
+
hidden_states=outputs.hidden_states,
|
| 1089 |
+
attentions=outputs.attentions,
|
| 1090 |
+
)
|
| 1091 |
+
|
| 1092 |
+
|
| 1093 |
+
# Copied from transformers.models.deberta.modeling_deberta.ContextPooler
|
| 1094 |
+
class ContextPooler(nn.Module):
|
| 1095 |
+
def __init__(self, config):
|
| 1096 |
+
super().__init__()
|
| 1097 |
+
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
|
| 1098 |
+
self.dropout = nn.Dropout(config.pooler_dropout)
|
| 1099 |
+
self.config = config
|
| 1100 |
+
|
| 1101 |
+
def forward(self, hidden_states):
|
| 1102 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
| 1103 |
+
# to the first token.
|
| 1104 |
+
|
| 1105 |
+
context_token = hidden_states[:, 0]
|
| 1106 |
+
context_token = self.dropout(context_token)
|
| 1107 |
+
pooled_output = self.dense(context_token)
|
| 1108 |
+
pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
|
| 1109 |
+
return pooled_output
|
| 1110 |
+
|
| 1111 |
+
@property
|
| 1112 |
+
def output_dim(self):
|
| 1113 |
+
return self.config.hidden_size
|
| 1114 |
+
|
| 1115 |
+
|
| 1116 |
+
@add_start_docstrings(
|
| 1117 |
+
"""
|
| 1118 |
+
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
|
| 1119 |
+
pooled output) e.g. for GLUE tasks.
|
| 1120 |
+
""",
|
| 1121 |
+
DEBERTA_START_DOCSTRING,
|
| 1122 |
+
)
|
| 1123 |
+
class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):
|
| 1124 |
+
def __init__(self, config):
|
| 1125 |
+
super().__init__(config)
|
| 1126 |
+
|
| 1127 |
+
num_labels = getattr(config, "num_labels", 2)
|
| 1128 |
+
self.num_labels = num_labels
|
| 1129 |
+
|
| 1130 |
+
self.deberta = DebertaV2Model(config)
|
| 1131 |
+
self.pooler = ContextPooler(config)
|
| 1132 |
+
output_dim = self.pooler.output_dim
|
| 1133 |
+
|
| 1134 |
+
self.classifier = nn.Linear(output_dim, num_labels)
|
| 1135 |
+
drop_out = getattr(config, "cls_dropout", None)
|
| 1136 |
+
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
|
| 1137 |
+
self.dropout = nn.Dropout(drop_out)
|
| 1138 |
+
|
| 1139 |
+
# Initialize weights and apply final processing
|
| 1140 |
+
self.post_init()
|
| 1141 |
+
|
| 1142 |
+
def get_input_embeddings(self):
|
| 1143 |
+
return self.deberta.get_input_embeddings()
|
| 1144 |
+
|
| 1145 |
+
def set_input_embeddings(self, new_embeddings):
|
| 1146 |
+
self.deberta.set_input_embeddings(new_embeddings)
|
| 1147 |
+
|
| 1148 |
+
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1149 |
+
@add_code_sample_docstrings(
|
| 1150 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1151 |
+
output_type=SequenceClassifierOutput,
|
| 1152 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1153 |
+
)
|
| 1154 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification.forward with Deberta->DebertaV2
|
| 1155 |
+
def forward(
|
| 1156 |
+
self,
|
| 1157 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1158 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1159 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1160 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1161 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1162 |
+
labels: Optional[torch.Tensor] = None,
|
| 1163 |
+
output_attentions: Optional[bool] = None,
|
| 1164 |
+
output_hidden_states: Optional[bool] = None,
|
| 1165 |
+
return_dict: Optional[bool] = None,
|
| 1166 |
+
) -> Union[Tuple, SequenceClassifierOutput]:
|
| 1167 |
+
r"""
|
| 1168 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1169 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1170 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1171 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1172 |
+
"""
|
| 1173 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1174 |
+
|
| 1175 |
+
outputs = self.deberta(
|
| 1176 |
+
input_ids,
|
| 1177 |
+
token_type_ids=token_type_ids,
|
| 1178 |
+
attention_mask=attention_mask,
|
| 1179 |
+
position_ids=position_ids,
|
| 1180 |
+
inputs_embeds=inputs_embeds,
|
| 1181 |
+
output_attentions=output_attentions,
|
| 1182 |
+
output_hidden_states=output_hidden_states,
|
| 1183 |
+
return_dict=return_dict,
|
| 1184 |
+
)
|
| 1185 |
+
|
| 1186 |
+
encoder_layer = outputs[0]
|
| 1187 |
+
pooled_output = self.pooler(encoder_layer)
|
| 1188 |
+
pooled_output = self.dropout(pooled_output)
|
| 1189 |
+
logits = self.classifier(pooled_output)
|
| 1190 |
+
|
| 1191 |
+
loss = None
|
| 1192 |
+
if labels is not None:
|
| 1193 |
+
if self.config.problem_type is None:
|
| 1194 |
+
if self.num_labels == 1:
|
| 1195 |
+
# regression task
|
| 1196 |
+
loss_fn = nn.MSELoss()
|
| 1197 |
+
logits = logits.view(-1).to(labels.dtype)
|
| 1198 |
+
loss = loss_fn(logits, labels.view(-1))
|
| 1199 |
+
elif labels.dim() == 1 or labels.size(-1) == 1:
|
| 1200 |
+
label_index = (labels >= 0).nonzero()
|
| 1201 |
+
labels = labels.long()
|
| 1202 |
+
if label_index.size(0) > 0:
|
| 1203 |
+
labeled_logits = torch.gather(
|
| 1204 |
+
logits, 0, label_index.expand(label_index.size(0), logits.size(1))
|
| 1205 |
+
)
|
| 1206 |
+
labels = torch.gather(labels, 0, label_index.view(-1))
|
| 1207 |
+
loss_fct = CrossEntropyLoss()
|
| 1208 |
+
loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
|
| 1209 |
+
else:
|
| 1210 |
+
loss = torch.tensor(0).to(logits)
|
| 1211 |
+
else:
|
| 1212 |
+
log_softmax = nn.LogSoftmax(-1)
|
| 1213 |
+
loss = -((log_softmax(logits) * labels).sum(-1)).mean()
|
| 1214 |
+
elif self.config.problem_type == "regression":
|
| 1215 |
+
loss_fct = MSELoss()
|
| 1216 |
+
if self.num_labels == 1:
|
| 1217 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 1218 |
+
else:
|
| 1219 |
+
loss = loss_fct(logits, labels)
|
| 1220 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1221 |
+
loss_fct = CrossEntropyLoss()
|
| 1222 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 1223 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1224 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1225 |
+
loss = loss_fct(logits, labels)
|
| 1226 |
+
if not return_dict:
|
| 1227 |
+
output = (logits,) + outputs[1:]
|
| 1228 |
+
return ((loss,) + output) if loss is not None else output
|
| 1229 |
+
|
| 1230 |
+
return SequenceClassifierOutput(
|
| 1231 |
+
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
|
| 1232 |
+
)
|
| 1233 |
+
|
| 1234 |
+
|
| 1235 |
+
@add_start_docstrings(
|
| 1236 |
+
"""
|
| 1237 |
+
DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
|
| 1238 |
+
Named-Entity-Recognition (NER) tasks.
|
| 1239 |
+
""",
|
| 1240 |
+
DEBERTA_START_DOCSTRING,
|
| 1241 |
+
)
|
| 1242 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaForTokenClassification with Deberta->DebertaV2
|
| 1243 |
+
class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):
|
| 1244 |
+
def __init__(self, config):
|
| 1245 |
+
super().__init__(config)
|
| 1246 |
+
self.num_labels = config.num_labels
|
| 1247 |
+
|
| 1248 |
+
self.deberta = DebertaV2Model(config)
|
| 1249 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 1250 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
| 1251 |
+
|
| 1252 |
+
# Initialize weights and apply final processing
|
| 1253 |
+
self.post_init()
|
| 1254 |
+
|
| 1255 |
+
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1256 |
+
@add_code_sample_docstrings(
|
| 1257 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1258 |
+
output_type=TokenClassifierOutput,
|
| 1259 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1260 |
+
)
|
| 1261 |
+
def forward(
|
| 1262 |
+
self,
|
| 1263 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1264 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1265 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1266 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1267 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1268 |
+
labels: Optional[torch.Tensor] = None,
|
| 1269 |
+
output_attentions: Optional[bool] = None,
|
| 1270 |
+
output_hidden_states: Optional[bool] = None,
|
| 1271 |
+
return_dict: Optional[bool] = None,
|
| 1272 |
+
) -> Union[Tuple, TokenClassifierOutput]:
|
| 1273 |
+
r"""
|
| 1274 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1275 |
+
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
|
| 1276 |
+
"""
|
| 1277 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1278 |
+
|
| 1279 |
+
outputs = self.deberta(
|
| 1280 |
+
input_ids,
|
| 1281 |
+
attention_mask=attention_mask,
|
| 1282 |
+
token_type_ids=token_type_ids,
|
| 1283 |
+
position_ids=position_ids,
|
| 1284 |
+
inputs_embeds=inputs_embeds,
|
| 1285 |
+
output_attentions=output_attentions,
|
| 1286 |
+
output_hidden_states=output_hidden_states,
|
| 1287 |
+
return_dict=return_dict,
|
| 1288 |
+
)
|
| 1289 |
+
|
| 1290 |
+
sequence_output = outputs[0]
|
| 1291 |
+
|
| 1292 |
+
sequence_output = self.dropout(sequence_output)
|
| 1293 |
+
logits = self.classifier(sequence_output)
|
| 1294 |
+
|
| 1295 |
+
loss = None
|
| 1296 |
+
if labels is not None:
|
| 1297 |
+
loss_fct = CrossEntropyLoss()
|
| 1298 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 1299 |
+
|
| 1300 |
+
if not return_dict:
|
| 1301 |
+
output = (logits,) + outputs[1:]
|
| 1302 |
+
return ((loss,) + output) if loss is not None else output
|
| 1303 |
+
|
| 1304 |
+
return TokenClassifierOutput(
|
| 1305 |
+
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
|
| 1306 |
+
)
|
| 1307 |
+
|
| 1308 |
+
|
| 1309 |
+
@add_start_docstrings(
|
| 1310 |
+
"""
|
| 1311 |
+
DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
|
| 1312 |
+
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
| 1313 |
+
""",
|
| 1314 |
+
DEBERTA_START_DOCSTRING,
|
| 1315 |
+
)
|
| 1316 |
+
class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):
|
| 1317 |
+
def __init__(self, config):
|
| 1318 |
+
super().__init__(config)
|
| 1319 |
+
self.num_labels = config.num_labels
|
| 1320 |
+
|
| 1321 |
+
self.deberta = DebertaV2Model(config)
|
| 1322 |
+
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
|
| 1323 |
+
|
| 1324 |
+
# Initialize weights and apply final processing
|
| 1325 |
+
self.post_init()
|
| 1326 |
+
|
| 1327 |
+
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1328 |
+
@add_code_sample_docstrings(
|
| 1329 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1330 |
+
output_type=QuestionAnsweringModelOutput,
|
| 1331 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1332 |
+
qa_target_start_index=_QA_TARGET_START_INDEX,
|
| 1333 |
+
qa_target_end_index=_QA_TARGET_END_INDEX,
|
| 1334 |
+
)
|
| 1335 |
+
# Copied from transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering.forward with Deberta->DebertaV2
|
| 1336 |
+
def forward(
|
| 1337 |
+
self,
|
| 1338 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1339 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1340 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1341 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1342 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1343 |
+
start_positions: Optional[torch.Tensor] = None,
|
| 1344 |
+
end_positions: Optional[torch.Tensor] = None,
|
| 1345 |
+
output_attentions: Optional[bool] = None,
|
| 1346 |
+
output_hidden_states: Optional[bool] = None,
|
| 1347 |
+
return_dict: Optional[bool] = None,
|
| 1348 |
+
) -> Union[Tuple, QuestionAnsweringModelOutput]:
|
| 1349 |
+
r"""
|
| 1350 |
+
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1351 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
| 1352 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1353 |
+
are not taken into account for computing the loss.
|
| 1354 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1355 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
| 1356 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1357 |
+
are not taken into account for computing the loss.
|
| 1358 |
+
"""
|
| 1359 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1360 |
+
|
| 1361 |
+
outputs = self.deberta(
|
| 1362 |
+
input_ids,
|
| 1363 |
+
attention_mask=attention_mask,
|
| 1364 |
+
token_type_ids=token_type_ids,
|
| 1365 |
+
position_ids=position_ids,
|
| 1366 |
+
inputs_embeds=inputs_embeds,
|
| 1367 |
+
output_attentions=output_attentions,
|
| 1368 |
+
output_hidden_states=output_hidden_states,
|
| 1369 |
+
return_dict=return_dict,
|
| 1370 |
+
)
|
| 1371 |
+
|
| 1372 |
+
sequence_output = outputs[0]
|
| 1373 |
+
|
| 1374 |
+
logits = self.qa_outputs(sequence_output)
|
| 1375 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
| 1376 |
+
start_logits = start_logits.squeeze(-1).contiguous()
|
| 1377 |
+
end_logits = end_logits.squeeze(-1).contiguous()
|
| 1378 |
+
|
| 1379 |
+
total_loss = None
|
| 1380 |
+
if start_positions is not None and end_positions is not None:
|
| 1381 |
+
# If we are on multi-GPU, split add a dimension
|
| 1382 |
+
if len(start_positions.size()) > 1:
|
| 1383 |
+
start_positions = start_positions.squeeze(-1)
|
| 1384 |
+
if len(end_positions.size()) > 1:
|
| 1385 |
+
end_positions = end_positions.squeeze(-1)
|
| 1386 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
| 1387 |
+
ignored_index = start_logits.size(1)
|
| 1388 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
| 1389 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
| 1390 |
+
|
| 1391 |
+
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
| 1392 |
+
start_loss = loss_fct(start_logits, start_positions)
|
| 1393 |
+
end_loss = loss_fct(end_logits, end_positions)
|
| 1394 |
+
total_loss = (start_loss + end_loss) / 2
|
| 1395 |
+
|
| 1396 |
+
if not return_dict:
|
| 1397 |
+
output = (start_logits, end_logits) + outputs[1:]
|
| 1398 |
+
return ((total_loss,) + output) if total_loss is not None else output
|
| 1399 |
+
|
| 1400 |
+
return QuestionAnsweringModelOutput(
|
| 1401 |
+
loss=total_loss,
|
| 1402 |
+
start_logits=start_logits,
|
| 1403 |
+
end_logits=end_logits,
|
| 1404 |
+
hidden_states=outputs.hidden_states,
|
| 1405 |
+
attentions=outputs.attentions,
|
| 1406 |
+
)
|
| 1407 |
+
|
| 1408 |
+
|
| 1409 |
+
@add_start_docstrings(
|
| 1410 |
+
"""
|
| 1411 |
+
DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
|
| 1412 |
+
softmax) e.g. for RocStories/SWAG tasks.
|
| 1413 |
+
""",
|
| 1414 |
+
DEBERTA_START_DOCSTRING,
|
| 1415 |
+
)
|
| 1416 |
+
class DebertaV2ForMultipleChoice(DebertaV2PreTrainedModel):
|
| 1417 |
+
def __init__(self, config):
|
| 1418 |
+
super().__init__(config)
|
| 1419 |
+
|
| 1420 |
+
num_labels = getattr(config, "num_labels", 2)
|
| 1421 |
+
self.num_labels = num_labels
|
| 1422 |
+
|
| 1423 |
+
self.deberta = DebertaV2Model(config)
|
| 1424 |
+
self.pooler = ContextPooler(config)
|
| 1425 |
+
output_dim = self.pooler.output_dim
|
| 1426 |
+
|
| 1427 |
+
self.classifier = nn.Linear(output_dim, 1)
|
| 1428 |
+
drop_out = getattr(config, "cls_dropout", None)
|
| 1429 |
+
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
|
| 1430 |
+
self.dropout = nn.Dropout(drop_out)
|
| 1431 |
+
|
| 1432 |
+
self.init_weights()
|
| 1433 |
+
|
| 1434 |
+
def get_input_embeddings(self):
|
| 1435 |
+
return self.deberta.get_input_embeddings()
|
| 1436 |
+
|
| 1437 |
+
def set_input_embeddings(self, new_embeddings):
|
| 1438 |
+
self.deberta.set_input_embeddings(new_embeddings)
|
| 1439 |
+
|
| 1440 |
+
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1441 |
+
@add_code_sample_docstrings(
|
| 1442 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1443 |
+
output_type=MultipleChoiceModelOutput,
|
| 1444 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1445 |
+
)
|
| 1446 |
+
def forward(
|
| 1447 |
+
self,
|
| 1448 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1449 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1450 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1451 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1452 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1453 |
+
labels: Optional[torch.Tensor] = None,
|
| 1454 |
+
output_attentions: Optional[bool] = None,
|
| 1455 |
+
output_hidden_states: Optional[bool] = None,
|
| 1456 |
+
return_dict: Optional[bool] = None,
|
| 1457 |
+
) -> Union[Tuple, MultipleChoiceModelOutput]:
|
| 1458 |
+
r"""
|
| 1459 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1460 |
+
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
|
| 1461 |
+
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
|
| 1462 |
+
`input_ids` above)
|
| 1463 |
+
"""
|
| 1464 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1465 |
+
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
|
| 1466 |
+
|
| 1467 |
+
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
|
| 1468 |
+
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
|
| 1469 |
+
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
|
| 1470 |
+
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
|
| 1471 |
+
flat_inputs_embeds = (
|
| 1472 |
+
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
|
| 1473 |
+
if inputs_embeds is not None
|
| 1474 |
+
else None
|
| 1475 |
+
)
|
| 1476 |
+
|
| 1477 |
+
outputs = self.deberta(
|
| 1478 |
+
flat_input_ids,
|
| 1479 |
+
position_ids=flat_position_ids,
|
| 1480 |
+
token_type_ids=flat_token_type_ids,
|
| 1481 |
+
attention_mask=flat_attention_mask,
|
| 1482 |
+
inputs_embeds=flat_inputs_embeds,
|
| 1483 |
+
output_attentions=output_attentions,
|
| 1484 |
+
output_hidden_states=output_hidden_states,
|
| 1485 |
+
return_dict=return_dict,
|
| 1486 |
+
)
|
| 1487 |
+
|
| 1488 |
+
encoder_layer = outputs[0]
|
| 1489 |
+
pooled_output = self.pooler(encoder_layer)
|
| 1490 |
+
pooled_output = self.dropout(pooled_output)
|
| 1491 |
+
logits = self.classifier(pooled_output)
|
| 1492 |
+
reshaped_logits = logits.view(-1, num_choices)
|
| 1493 |
+
|
| 1494 |
+
loss = None
|
| 1495 |
+
if labels is not None:
|
| 1496 |
+
loss_fct = CrossEntropyLoss()
|
| 1497 |
+
loss = loss_fct(reshaped_logits, labels)
|
| 1498 |
+
|
| 1499 |
+
if not return_dict:
|
| 1500 |
+
output = (reshaped_logits,) + outputs[1:]
|
| 1501 |
+
return ((loss,) + output) if loss is not None else output
|
| 1502 |
+
|
| 1503 |
+
return MultipleChoiceModelOutput(
|
| 1504 |
+
loss=loss,
|
| 1505 |
+
logits=reshaped_logits,
|
| 1506 |
+
hidden_states=outputs.hidden_states,
|
| 1507 |
+
attentions=outputs.attentions,
|
| 1508 |
+
)
|
| 1509 |
+
|
| 1510 |
+
|
| 1511 |
+
__all__ = [
|
| 1512 |
+
"DebertaV2ForMaskedLM",
|
| 1513 |
+
"DebertaV2ForMultipleChoice",
|
| 1514 |
+
"DebertaV2ForQuestionAnswering",
|
| 1515 |
+
"DebertaV2ForSequenceClassification",
|
| 1516 |
+
"DebertaV2ForTokenClassification",
|
| 1517 |
+
"DebertaV2Model",
|
| 1518 |
+
"DebertaV2PreTrainedModel",
|
| 1519 |
+
]
|
janus/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2020 Microsoft and the HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Fast Tokenization class for model DeBERTa."""
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
from shutil import copyfile
|
| 19 |
+
from typing import Optional, Tuple
|
| 20 |
+
|
| 21 |
+
from ...file_utils import is_sentencepiece_available
|
| 22 |
+
from ...tokenization_utils_fast import PreTrainedTokenizerFast
|
| 23 |
+
from ...utils import logging
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
if is_sentencepiece_available():
|
| 27 |
+
from .tokenization_deberta_v2 import DebertaV2Tokenizer
|
| 28 |
+
else:
|
| 29 |
+
DebertaV2Tokenizer = None
|
| 30 |
+
|
| 31 |
+
logger = logging.get_logger(__name__)
|
| 32 |
+
|
| 33 |
+
VOCAB_FILES_NAMES = {"vocab_file": "spm.model", "tokenizer_file": "tokenizer.json"}
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class DebertaV2TokenizerFast(PreTrainedTokenizerFast):
|
| 37 |
+
r"""
|
| 38 |
+
Constructs a DeBERTa-v2 fast tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
vocab_file (`str`):
|
| 42 |
+
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
|
| 43 |
+
contains the vocabulary necessary to instantiate a tokenizer.
|
| 44 |
+
do_lower_case (`bool`, *optional*, defaults to `False`):
|
| 45 |
+
Whether or not to lowercase the input when tokenizing.
|
| 46 |
+
bos_token (`string`, *optional*, defaults to `"[CLS]"`):
|
| 47 |
+
The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
|
| 48 |
+
When building a sequence using special tokens, this is not the token that is used for the beginning of
|
| 49 |
+
sequence. The token used is the `cls_token`.
|
| 50 |
+
eos_token (`string`, *optional*, defaults to `"[SEP]"`):
|
| 51 |
+
The end of sequence token. When building a sequence using special tokens, this is not the token that is
|
| 52 |
+
used for the end of sequence. The token used is the `sep_token`.
|
| 53 |
+
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
|
| 54 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 55 |
+
token instead.
|
| 56 |
+
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
|
| 57 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
| 58 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
| 59 |
+
token of a sequence built with special tokens.
|
| 60 |
+
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
|
| 61 |
+
The token used for padding, for example when batching sequences of different lengths.
|
| 62 |
+
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
|
| 63 |
+
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
| 64 |
+
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
| 65 |
+
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
|
| 66 |
+
The token used for masking values. This is the token used when training this model with masked language
|
| 67 |
+
modeling. This is the token which the model will try to predict.
|
| 68 |
+
sp_model_kwargs (`dict`, *optional*):
|
| 69 |
+
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
|
| 70 |
+
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
|
| 71 |
+
to set:
|
| 72 |
+
|
| 73 |
+
- `enable_sampling`: Enable subword regularization.
|
| 74 |
+
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
|
| 75 |
+
|
| 76 |
+
- `nbest_size = {0,1}`: No sampling is performed.
|
| 77 |
+
- `nbest_size > 1`: samples from the nbest_size results.
|
| 78 |
+
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
|
| 79 |
+
using forward-filtering-and-backward-sampling algorithm.
|
| 80 |
+
|
| 81 |
+
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
|
| 82 |
+
BPE-dropout.
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 86 |
+
slow_tokenizer_class = DebertaV2Tokenizer
|
| 87 |
+
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
vocab_file=None,
|
| 91 |
+
tokenizer_file=None,
|
| 92 |
+
do_lower_case=False,
|
| 93 |
+
split_by_punct=False,
|
| 94 |
+
bos_token="[CLS]",
|
| 95 |
+
eos_token="[SEP]",
|
| 96 |
+
unk_token="[UNK]",
|
| 97 |
+
sep_token="[SEP]",
|
| 98 |
+
pad_token="[PAD]",
|
| 99 |
+
cls_token="[CLS]",
|
| 100 |
+
mask_token="[MASK]",
|
| 101 |
+
**kwargs,
|
| 102 |
+
) -> None:
|
| 103 |
+
super().__init__(
|
| 104 |
+
vocab_file,
|
| 105 |
+
tokenizer_file=tokenizer_file,
|
| 106 |
+
do_lower_case=do_lower_case,
|
| 107 |
+
bos_token=bos_token,
|
| 108 |
+
eos_token=eos_token,
|
| 109 |
+
unk_token=unk_token,
|
| 110 |
+
sep_token=sep_token,
|
| 111 |
+
pad_token=pad_token,
|
| 112 |
+
cls_token=cls_token,
|
| 113 |
+
mask_token=mask_token,
|
| 114 |
+
split_by_punct=split_by_punct,
|
| 115 |
+
**kwargs,
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
self.do_lower_case = do_lower_case
|
| 119 |
+
self.split_by_punct = split_by_punct
|
| 120 |
+
self.vocab_file = vocab_file
|
| 121 |
+
|
| 122 |
+
@property
|
| 123 |
+
def can_save_slow_tokenizer(self) -> bool:
|
| 124 |
+
return os.path.isfile(self.vocab_file) if self.vocab_file else False
|
| 125 |
+
|
| 126 |
+
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
| 127 |
+
"""
|
| 128 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
| 129 |
+
adding special tokens. A DeBERTa sequence has the following format:
|
| 130 |
+
|
| 131 |
+
- single sequence: [CLS] X [SEP]
|
| 132 |
+
- pair of sequences: [CLS] A [SEP] B [SEP]
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
token_ids_0 (`List[int]`):
|
| 136 |
+
List of IDs to which the special tokens will be added.
|
| 137 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 138 |
+
Optional second list of IDs for sequence pairs.
|
| 139 |
+
|
| 140 |
+
Returns:
|
| 141 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
if token_ids_1 is None:
|
| 145 |
+
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
| 146 |
+
cls = [self.cls_token_id]
|
| 147 |
+
sep = [self.sep_token_id]
|
| 148 |
+
return cls + token_ids_0 + sep + token_ids_1 + sep
|
| 149 |
+
|
| 150 |
+
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
|
| 151 |
+
"""
|
| 152 |
+
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
|
| 153 |
+
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
|
| 154 |
+
|
| 155 |
+
Args:
|
| 156 |
+
token_ids_0 (`List[int]`):
|
| 157 |
+
List of IDs.
|
| 158 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 159 |
+
Optional second list of IDs for sequence pairs.
|
| 160 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
| 161 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
if already_has_special_tokens:
|
| 168 |
+
return super().get_special_tokens_mask(
|
| 169 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
if token_ids_1 is not None:
|
| 173 |
+
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
|
| 174 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
| 175 |
+
|
| 176 |
+
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
|
| 177 |
+
"""
|
| 178 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
|
| 179 |
+
sequence pair mask has the following format:
|
| 180 |
+
|
| 181 |
+
```
|
| 182 |
+
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
| 183 |
+
| first sequence | second sequence |
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
|
| 187 |
+
|
| 188 |
+
Args:
|
| 189 |
+
token_ids_0 (`List[int]`):
|
| 190 |
+
List of IDs.
|
| 191 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 192 |
+
Optional second list of IDs for sequence pairs.
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
| 196 |
+
"""
|
| 197 |
+
sep = [self.sep_token_id]
|
| 198 |
+
cls = [self.cls_token_id]
|
| 199 |
+
if token_ids_1 is None:
|
| 200 |
+
return len(cls + token_ids_0 + sep) * [0]
|
| 201 |
+
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
| 202 |
+
|
| 203 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 204 |
+
if not self.can_save_slow_tokenizer:
|
| 205 |
+
raise ValueError(
|
| 206 |
+
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
|
| 207 |
+
"tokenizer."
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
if not os.path.isdir(save_directory):
|
| 211 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
| 212 |
+
return
|
| 213 |
+
out_vocab_file = os.path.join(
|
| 214 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
|
| 218 |
+
copyfile(self.vocab_file, out_vocab_file)
|
| 219 |
+
|
| 220 |
+
return (out_vocab_file,)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
__all__ = ["DebertaV2TokenizerFast"]
|
janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import _LazyModule
|
| 17 |
+
from ...utils.import_utils import define_import_structure
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from .configuration_dinov2_with_registers import *
|
| 22 |
+
from .modeling_dinov2_with_registers import *
|
| 23 |
+
else:
|
| 24 |
+
import sys
|
| 25 |
+
|
| 26 |
+
_file = globals()["__file__"]
|
| 27 |
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/configuration_dinov2_with_registers.cpython-310.pyc
ADDED
|
Binary file (6.94 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/modeling_dinov2_with_registers.cpython-310.pyc
ADDED
|
Binary file (30.8 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/modular_dinov2_with_registers.cpython-310.pyc
ADDED
|
Binary file (14 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 2 |
+
# This file was automatically generated from src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py.
|
| 3 |
+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
| 4 |
+
# the file from the modular. If any change should be done, please apply the change to the
|
| 5 |
+
# modular_dinov2_with_registers.py file directly. One of our CI enforces this.
|
| 6 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 7 |
+
# coding=utf-8
|
| 8 |
+
# Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved.
|
| 9 |
+
#
|
| 10 |
+
#
|
| 11 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 12 |
+
# you may not use this file except in compliance with the License.
|
| 13 |
+
# You may obtain a copy of the License at
|
| 14 |
+
#
|
| 15 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 16 |
+
#
|
| 17 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 18 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 19 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 20 |
+
# See the License for the specific language governing permissions and
|
| 21 |
+
# limitations under the License.
|
| 22 |
+
|
| 23 |
+
from ...configuration_utils import PretrainedConfig
|
| 24 |
+
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class Dinov2WithRegistersConfig(BackboneConfigMixin, PretrainedConfig):
|
| 28 |
+
r"""
|
| 29 |
+
This is the configuration class to store the configuration of a [`Dinov2WithRegistersModel`]. It is used to instantiate an
|
| 30 |
+
Dinov2WithRegisters model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 31 |
+
with the defaults will yield a similar configuration to that of the DINOv2 with Registers
|
| 32 |
+
[facebook/dinov2-with-registers-base](https://huggingface.co/facebook/dinov2-with-registers-base) architecture.
|
| 33 |
+
|
| 34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 35 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 39 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 40 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 41 |
+
Number of hidden layers in the Transformer encoder.
|
| 42 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 43 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 44 |
+
mlp_ratio (`int`, *optional*, defaults to 4):
|
| 45 |
+
Ratio of the hidden size of the MLPs relative to the `hidden_size`.
|
| 46 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 47 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 48 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
| 49 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
|
| 50 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 51 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
|
| 52 |
+
The dropout ratio for the attention probabilities.
|
| 53 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 54 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 55 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| 56 |
+
The epsilon used by the layer normalization layers.
|
| 57 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 58 |
+
The size (resolution) of each image.
|
| 59 |
+
patch_size (`int`, *optional*, defaults to 16):
|
| 60 |
+
The size (resolution) of each patch.
|
| 61 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 62 |
+
The number of input channels.
|
| 63 |
+
qkv_bias (`bool`, *optional*, defaults to `True`):
|
| 64 |
+
Whether to add a bias to the queries, keys and values.
|
| 65 |
+
layerscale_value (`float`, *optional*, defaults to 1.0):
|
| 66 |
+
Initial value to use for layer scale.
|
| 67 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
| 68 |
+
Stochastic depth rate per sample (when applied in the main path of residual layers).
|
| 69 |
+
use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
|
| 70 |
+
Whether to use the SwiGLU feedforward neural network.
|
| 71 |
+
num_register_tokens (`int`, *optional*, defaults to 4):
|
| 72 |
+
Number of register tokens to use.
|
| 73 |
+
out_features (`List[str]`, *optional*):
|
| 74 |
+
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
|
| 75 |
+
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
|
| 76 |
+
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
|
| 77 |
+
same order as defined in the `stage_names` attribute.
|
| 78 |
+
out_indices (`List[int]`, *optional*):
|
| 79 |
+
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
|
| 80 |
+
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
|
| 81 |
+
If unset and `out_features` is unset, will default to the last stage. Must be in the
|
| 82 |
+
same order as defined in the `stage_names` attribute.
|
| 83 |
+
apply_layernorm (`bool`, *optional*, defaults to `True`):
|
| 84 |
+
Whether to apply layer normalization to the feature maps in case the model is used as backbone.
|
| 85 |
+
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
|
| 86 |
+
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
|
| 87 |
+
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
|
| 88 |
+
seq_len, hidden_size)`.
|
| 89 |
+
|
| 90 |
+
Example:
|
| 91 |
+
|
| 92 |
+
```python
|
| 93 |
+
>>> from transformers import Dinov2WithRegistersConfig, Dinov2WithRegistersModel
|
| 94 |
+
|
| 95 |
+
>>> # Initializing a Dinov2WithRegisters base style configuration
|
| 96 |
+
>>> configuration = Dinov2WithRegistersConfig()
|
| 97 |
+
|
| 98 |
+
>>> # Initializing a model (with random weights) from the base style configuration
|
| 99 |
+
>>> model = Dinov2WithRegistersModel(configuration)
|
| 100 |
+
|
| 101 |
+
>>> # Accessing the model configuration
|
| 102 |
+
>>> configuration = model.config
|
| 103 |
+
```"""
|
| 104 |
+
|
| 105 |
+
model_type = "dinov2_with_registers"
|
| 106 |
+
|
| 107 |
+
def __init__(
|
| 108 |
+
self,
|
| 109 |
+
hidden_size=768,
|
| 110 |
+
num_hidden_layers=12,
|
| 111 |
+
num_attention_heads=12,
|
| 112 |
+
mlp_ratio=4,
|
| 113 |
+
hidden_act="gelu",
|
| 114 |
+
hidden_dropout_prob=0.0,
|
| 115 |
+
attention_probs_dropout_prob=0.0,
|
| 116 |
+
initializer_range=0.02,
|
| 117 |
+
layer_norm_eps=1e-6,
|
| 118 |
+
image_size=224,
|
| 119 |
+
patch_size=16,
|
| 120 |
+
num_channels=3,
|
| 121 |
+
qkv_bias=True,
|
| 122 |
+
layerscale_value=1.0,
|
| 123 |
+
drop_path_rate=0.0,
|
| 124 |
+
use_swiglu_ffn=False,
|
| 125 |
+
num_register_tokens=4,
|
| 126 |
+
out_features=None,
|
| 127 |
+
out_indices=None,
|
| 128 |
+
apply_layernorm=True,
|
| 129 |
+
reshape_hidden_states=True,
|
| 130 |
+
**kwargs,
|
| 131 |
+
):
|
| 132 |
+
super().__init__(**kwargs)
|
| 133 |
+
|
| 134 |
+
self.hidden_size = hidden_size
|
| 135 |
+
self.num_hidden_layers = num_hidden_layers
|
| 136 |
+
self.num_attention_heads = num_attention_heads
|
| 137 |
+
self.mlp_ratio = mlp_ratio
|
| 138 |
+
self.hidden_act = hidden_act
|
| 139 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 140 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 141 |
+
self.initializer_range = initializer_range
|
| 142 |
+
self.layer_norm_eps = layer_norm_eps
|
| 143 |
+
self.image_size = image_size
|
| 144 |
+
self.patch_size = patch_size
|
| 145 |
+
self.num_channels = num_channels
|
| 146 |
+
self.qkv_bias = qkv_bias
|
| 147 |
+
self.layerscale_value = layerscale_value
|
| 148 |
+
self.drop_path_rate = drop_path_rate
|
| 149 |
+
self.use_swiglu_ffn = use_swiglu_ffn
|
| 150 |
+
self.num_register_tokens = num_register_tokens
|
| 151 |
+
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)]
|
| 152 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 153 |
+
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
|
| 154 |
+
)
|
| 155 |
+
self.apply_layernorm = apply_layernorm
|
| 156 |
+
self.reshape_hidden_states = reshape_hidden_states
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
__all__ = ["Dinov2WithRegistersConfig"]
|
janus/lib/python3.10/site-packages/transformers/models/focalnet/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import _LazyModule
|
| 17 |
+
from ...utils.import_utils import define_import_structure
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from .configuration_focalnet import *
|
| 22 |
+
from .modeling_focalnet import *
|
| 23 |
+
else:
|
| 24 |
+
import sys
|
| 25 |
+
|
| 26 |
+
_file = globals()["__file__"]
|
| 27 |
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (543 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/configuration_focalnet.cpython-310.pyc
ADDED
|
Binary file (7.17 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/modeling_focalnet.cpython-310.pyc
ADDED
|
Binary file (32.5 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/focalnet/configuration_focalnet.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""FocalNet model configuration"""
|
| 16 |
+
|
| 17 |
+
from ...configuration_utils import PretrainedConfig
|
| 18 |
+
from ...utils import logging
|
| 19 |
+
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = logging.get_logger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class FocalNetConfig(BackboneConfigMixin, PretrainedConfig):
|
| 26 |
+
r"""
|
| 27 |
+
This is the configuration class to store the configuration of a [`FocalNetModel`]. It is used to instantiate a
|
| 28 |
+
FocalNet model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 29 |
+
with the defaults will yield a similar configuration to that of the FocalNet
|
| 30 |
+
[microsoft/focalnet-tiny](https://huggingface.co/microsoft/focalnet-tiny) architecture.
|
| 31 |
+
|
| 32 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 33 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 37 |
+
The size (resolution) of each image.
|
| 38 |
+
patch_size (`int`, *optional*, defaults to 4):
|
| 39 |
+
The size (resolution) of each patch in the embeddings layer.
|
| 40 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 41 |
+
The number of input channels.
|
| 42 |
+
embed_dim (`int`, *optional*, defaults to 96):
|
| 43 |
+
Dimensionality of patch embedding.
|
| 44 |
+
use_conv_embed (`bool`, *optional*, defaults to `False`):
|
| 45 |
+
Whether to use convolutional embedding. The authors noted that using convolutional embedding usually
|
| 46 |
+
improve the performance, but it's not used by default.
|
| 47 |
+
hidden_sizes (`List[int]`, *optional*, defaults to `[192, 384, 768, 768]`):
|
| 48 |
+
Dimensionality (hidden size) at each stage.
|
| 49 |
+
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
|
| 50 |
+
Depth (number of layers) of each stage in the encoder.
|
| 51 |
+
focal_levels (`list(int)`, *optional*, defaults to `[2, 2, 2, 2]`):
|
| 52 |
+
Number of focal levels in each layer of the respective stages in the encoder.
|
| 53 |
+
focal_windows (`list(int)`, *optional*, defaults to `[3, 3, 3, 3]`):
|
| 54 |
+
Focal window size in each layer of the respective stages in the encoder.
|
| 55 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 56 |
+
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
|
| 57 |
+
`"selu"` and `"gelu_new"` are supported.
|
| 58 |
+
mlp_ratio (`float`, *optional*, defaults to 4.0):
|
| 59 |
+
Ratio of MLP hidden dimensionality to embedding dimensionality.
|
| 60 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
|
| 61 |
+
The dropout probability for all fully connected layers in the embeddings and encoder.
|
| 62 |
+
drop_path_rate (`float`, *optional*, defaults to 0.1):
|
| 63 |
+
Stochastic depth rate.
|
| 64 |
+
use_layerscale (`bool`, *optional*, defaults to `False`):
|
| 65 |
+
Whether to use layer scale in the encoder.
|
| 66 |
+
layerscale_value (`float`, *optional*, defaults to 0.0001):
|
| 67 |
+
The initial value of the layer scale.
|
| 68 |
+
use_post_layernorm (`bool`, *optional*, defaults to `False`):
|
| 69 |
+
Whether to use post layer normalization in the encoder.
|
| 70 |
+
use_post_layernorm_in_modulation (`bool`, *optional*, defaults to `False`):
|
| 71 |
+
Whether to use post layer normalization in the modulation layer.
|
| 72 |
+
normalize_modulator (`bool`, *optional*, defaults to `False`):
|
| 73 |
+
Whether to normalize the modulator.
|
| 74 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 75 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 76 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
| 77 |
+
The epsilon used by the layer normalization layers.
|
| 78 |
+
encoder_stride (`int`, *optional*, defaults to 32):
|
| 79 |
+
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
|
| 80 |
+
out_features (`List[str]`, *optional*):
|
| 81 |
+
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
|
| 82 |
+
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
|
| 83 |
+
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
|
| 84 |
+
same order as defined in the `stage_names` attribute.
|
| 85 |
+
out_indices (`List[int]`, *optional*):
|
| 86 |
+
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
|
| 87 |
+
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
|
| 88 |
+
If unset and `out_features` is unset, will default to the last stage. Must be in the
|
| 89 |
+
same order as defined in the `stage_names` attribute.
|
| 90 |
+
|
| 91 |
+
Example:
|
| 92 |
+
|
| 93 |
+
```python
|
| 94 |
+
>>> from transformers import FocalNetConfig, FocalNetModel
|
| 95 |
+
|
| 96 |
+
>>> # Initializing a FocalNet microsoft/focalnet-tiny style configuration
|
| 97 |
+
>>> configuration = FocalNetConfig()
|
| 98 |
+
|
| 99 |
+
>>> # Initializing a model (with random weights) from the microsoft/focalnet-tiny style configuration
|
| 100 |
+
>>> model = FocalNetModel(configuration)
|
| 101 |
+
|
| 102 |
+
>>> # Accessing the model configuration
|
| 103 |
+
>>> configuration = model.config
|
| 104 |
+
```"""
|
| 105 |
+
|
| 106 |
+
model_type = "focalnet"
|
| 107 |
+
|
| 108 |
+
def __init__(
|
| 109 |
+
self,
|
| 110 |
+
image_size=224,
|
| 111 |
+
patch_size=4,
|
| 112 |
+
num_channels=3,
|
| 113 |
+
embed_dim=96,
|
| 114 |
+
use_conv_embed=False,
|
| 115 |
+
hidden_sizes=[192, 384, 768, 768],
|
| 116 |
+
depths=[2, 2, 6, 2],
|
| 117 |
+
focal_levels=[2, 2, 2, 2],
|
| 118 |
+
focal_windows=[3, 3, 3, 3],
|
| 119 |
+
hidden_act="gelu",
|
| 120 |
+
mlp_ratio=4.0,
|
| 121 |
+
hidden_dropout_prob=0.0,
|
| 122 |
+
drop_path_rate=0.1,
|
| 123 |
+
use_layerscale=False,
|
| 124 |
+
layerscale_value=1e-4,
|
| 125 |
+
use_post_layernorm=False,
|
| 126 |
+
use_post_layernorm_in_modulation=False,
|
| 127 |
+
normalize_modulator=False,
|
| 128 |
+
initializer_range=0.02,
|
| 129 |
+
layer_norm_eps=1e-5,
|
| 130 |
+
encoder_stride=32,
|
| 131 |
+
out_features=None,
|
| 132 |
+
out_indices=None,
|
| 133 |
+
**kwargs,
|
| 134 |
+
):
|
| 135 |
+
super().__init__(**kwargs)
|
| 136 |
+
|
| 137 |
+
self.image_size = image_size
|
| 138 |
+
self.patch_size = patch_size
|
| 139 |
+
self.num_channels = num_channels
|
| 140 |
+
self.embed_dim = embed_dim
|
| 141 |
+
self.use_conv_embed = use_conv_embed
|
| 142 |
+
self.hidden_sizes = hidden_sizes
|
| 143 |
+
self.depths = depths
|
| 144 |
+
self.focal_levels = focal_levels
|
| 145 |
+
self.focal_windows = focal_windows
|
| 146 |
+
self.hidden_act = hidden_act
|
| 147 |
+
self.mlp_ratio = mlp_ratio
|
| 148 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 149 |
+
self.drop_path_rate = drop_path_rate
|
| 150 |
+
self.use_layerscale = use_layerscale
|
| 151 |
+
self.layerscale_value = layerscale_value
|
| 152 |
+
self.use_post_layernorm = use_post_layernorm
|
| 153 |
+
self.use_post_layernorm_in_modulation = use_post_layernorm_in_modulation
|
| 154 |
+
self.normalize_modulator = normalize_modulator
|
| 155 |
+
self.initializer_range = initializer_range
|
| 156 |
+
self.layer_norm_eps = layer_norm_eps
|
| 157 |
+
self.encoder_stride = encoder_stride
|
| 158 |
+
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
|
| 159 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 160 |
+
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
__all__ = ["FocalNetConfig"]
|
janus/lib/python3.10/site-packages/transformers/models/focalnet/modeling_focalnet.py
ADDED
|
@@ -0,0 +1,1038 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""PyTorch FocalNet model."""
|
| 16 |
+
|
| 17 |
+
import collections.abc
|
| 18 |
+
import math
|
| 19 |
+
from dataclasses import dataclass
|
| 20 |
+
from typing import Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
import torch.utils.checkpoint
|
| 24 |
+
from torch import nn
|
| 25 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 26 |
+
|
| 27 |
+
from ...activations import ACT2FN
|
| 28 |
+
from ...modeling_outputs import BackboneOutput
|
| 29 |
+
from ...modeling_utils import PreTrainedModel
|
| 30 |
+
from ...utils import (
|
| 31 |
+
ModelOutput,
|
| 32 |
+
add_code_sample_docstrings,
|
| 33 |
+
add_start_docstrings,
|
| 34 |
+
add_start_docstrings_to_model_forward,
|
| 35 |
+
logging,
|
| 36 |
+
replace_return_docstrings,
|
| 37 |
+
)
|
| 38 |
+
from ...utils.backbone_utils import BackboneMixin
|
| 39 |
+
from .configuration_focalnet import FocalNetConfig
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
logger = logging.get_logger(__name__)
|
| 43 |
+
|
| 44 |
+
# General docstring
|
| 45 |
+
_CONFIG_FOR_DOC = "FocalNetConfig"
|
| 46 |
+
|
| 47 |
+
# Base docstring
|
| 48 |
+
_CHECKPOINT_FOR_DOC = "microsoft/focalnet-tiny"
|
| 49 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 49, 768]
|
| 50 |
+
|
| 51 |
+
# Image classification docstring
|
| 52 |
+
_IMAGE_CLASS_CHECKPOINT = "microsoft/focalnet-tiny"
|
| 53 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@dataclass
|
| 57 |
+
class FocalNetEncoderOutput(ModelOutput):
|
| 58 |
+
"""
|
| 59 |
+
FocalNet encoder's outputs, with potential hidden states.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 63 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 64 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 65 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 66 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
| 67 |
+
|
| 68 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
| 69 |
+
|
| 70 |
+
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 71 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 72 |
+
shape `(batch_size, hidden_size, height, width)`.
|
| 73 |
+
|
| 74 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
|
| 75 |
+
include the spatial dimensions.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
last_hidden_state: torch.FloatTensor = None
|
| 79 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 80 |
+
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@dataclass
|
| 84 |
+
class FocalNetModelOutput(ModelOutput):
|
| 85 |
+
"""
|
| 86 |
+
FocalNet model's outputs that also contains a pooling of the last hidden states.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 90 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 91 |
+
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
|
| 92 |
+
Average pooling of the last layer hidden-state.
|
| 93 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 94 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 95 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
| 96 |
+
|
| 97 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
| 98 |
+
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 99 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 100 |
+
shape `(batch_size, hidden_size, height, width)`.
|
| 101 |
+
|
| 102 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
|
| 103 |
+
include the spatial dimensions.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
last_hidden_state: torch.FloatTensor = None
|
| 107 |
+
pooler_output: Optional[torch.FloatTensor] = None
|
| 108 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 109 |
+
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@dataclass
|
| 113 |
+
class FocalNetMaskedImageModelingOutput(ModelOutput):
|
| 114 |
+
"""
|
| 115 |
+
FocalNet masked image model outputs.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
|
| 119 |
+
Masked image modeling (MLM) loss.
|
| 120 |
+
reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 121 |
+
Reconstructed pixel values.
|
| 122 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 123 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 124 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
| 125 |
+
|
| 126 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
| 127 |
+
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 128 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 129 |
+
shape `(batch_size, hidden_size, height, width)`.
|
| 130 |
+
|
| 131 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
|
| 132 |
+
include the spatial dimensions.
|
| 133 |
+
"""
|
| 134 |
+
|
| 135 |
+
loss: Optional[torch.FloatTensor] = None
|
| 136 |
+
reconstruction: torch.FloatTensor = None
|
| 137 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 138 |
+
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@dataclass
|
| 142 |
+
class FocalNetImageClassifierOutput(ModelOutput):
|
| 143 |
+
"""
|
| 144 |
+
FocalNet outputs for image classification.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| 148 |
+
Classification (or regression if config.num_labels==1) loss.
|
| 149 |
+
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
|
| 150 |
+
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
| 151 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 152 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 153 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
| 154 |
+
|
| 155 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
| 156 |
+
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 157 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
|
| 158 |
+
shape `(batch_size, hidden_size, height, width)`.
|
| 159 |
+
|
| 160 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
|
| 161 |
+
include the spatial dimensions.
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
loss: Optional[torch.FloatTensor] = None
|
| 165 |
+
logits: torch.FloatTensor = None
|
| 166 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 167 |
+
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class FocalNetEmbeddings(nn.Module):
|
| 171 |
+
"""
|
| 172 |
+
Construct the patch embeddings and layernorm. Optionally, also the mask token.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
def __init__(self, config, use_mask_token=False):
|
| 176 |
+
super().__init__()
|
| 177 |
+
|
| 178 |
+
self.patch_embeddings = FocalNetPatchEmbeddings(
|
| 179 |
+
config=config,
|
| 180 |
+
image_size=config.image_size,
|
| 181 |
+
patch_size=config.patch_size,
|
| 182 |
+
num_channels=config.num_channels,
|
| 183 |
+
embed_dim=config.embed_dim,
|
| 184 |
+
use_conv_embed=config.use_conv_embed,
|
| 185 |
+
is_stem=True,
|
| 186 |
+
)
|
| 187 |
+
self.patch_grid = self.patch_embeddings.grid_size
|
| 188 |
+
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
|
| 189 |
+
|
| 190 |
+
self.norm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps)
|
| 191 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 192 |
+
|
| 193 |
+
def forward(
|
| 194 |
+
self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None
|
| 195 |
+
) -> Tuple[torch.Tensor]:
|
| 196 |
+
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
|
| 197 |
+
embeddings = self.norm(embeddings)
|
| 198 |
+
batch_size, seq_len, _ = embeddings.size()
|
| 199 |
+
|
| 200 |
+
if bool_masked_pos is not None:
|
| 201 |
+
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
|
| 202 |
+
# replace the masked visual tokens by mask_tokens
|
| 203 |
+
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
|
| 204 |
+
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
|
| 205 |
+
|
| 206 |
+
embeddings = self.dropout(embeddings)
|
| 207 |
+
return embeddings, output_dimensions
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class FocalNetPatchEmbeddings(nn.Module):
|
| 211 |
+
def __init__(
|
| 212 |
+
self,
|
| 213 |
+
config,
|
| 214 |
+
image_size,
|
| 215 |
+
patch_size,
|
| 216 |
+
num_channels,
|
| 217 |
+
embed_dim,
|
| 218 |
+
add_norm=False,
|
| 219 |
+
use_conv_embed=False,
|
| 220 |
+
is_stem=False,
|
| 221 |
+
):
|
| 222 |
+
super().__init__()
|
| 223 |
+
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
|
| 224 |
+
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
|
| 225 |
+
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
|
| 226 |
+
self.image_size = image_size
|
| 227 |
+
self.patch_size = patch_size
|
| 228 |
+
self.num_channels = num_channels
|
| 229 |
+
self.num_patches = num_patches
|
| 230 |
+
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
|
| 231 |
+
|
| 232 |
+
if use_conv_embed:
|
| 233 |
+
# if we choose to use conv embedding, then we treat the stem and non-stem differently
|
| 234 |
+
if is_stem:
|
| 235 |
+
kernel_size = 7
|
| 236 |
+
padding = 2
|
| 237 |
+
stride = 4
|
| 238 |
+
else:
|
| 239 |
+
kernel_size = 3
|
| 240 |
+
padding = 1
|
| 241 |
+
stride = 2
|
| 242 |
+
self.projection = nn.Conv2d(
|
| 243 |
+
num_channels, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
|
| 244 |
+
)
|
| 245 |
+
else:
|
| 246 |
+
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
|
| 247 |
+
|
| 248 |
+
if add_norm:
|
| 249 |
+
self.norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 250 |
+
else:
|
| 251 |
+
self.norm = None
|
| 252 |
+
|
| 253 |
+
def maybe_pad(self, pixel_values, height, width):
|
| 254 |
+
if width % self.patch_size[1] != 0:
|
| 255 |
+
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
|
| 256 |
+
pixel_values = nn.functional.pad(pixel_values, pad_values)
|
| 257 |
+
if height % self.patch_size[0] != 0:
|
| 258 |
+
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
|
| 259 |
+
pixel_values = nn.functional.pad(pixel_values, pad_values)
|
| 260 |
+
return pixel_values
|
| 261 |
+
|
| 262 |
+
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
|
| 263 |
+
_, num_channels, height, width = pixel_values.shape
|
| 264 |
+
if num_channels != self.num_channels:
|
| 265 |
+
raise ValueError(
|
| 266 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
| 267 |
+
)
|
| 268 |
+
# pad the input to be divisible by self.patch_size, if needed
|
| 269 |
+
pixel_values = self.maybe_pad(pixel_values, height, width)
|
| 270 |
+
embeddings = self.projection(pixel_values)
|
| 271 |
+
_, _, height, width = embeddings.shape
|
| 272 |
+
output_dimensions = (height, width)
|
| 273 |
+
embeddings = embeddings.flatten(2).transpose(1, 2)
|
| 274 |
+
|
| 275 |
+
if self.norm is not None:
|
| 276 |
+
embeddings = self.norm(embeddings)
|
| 277 |
+
|
| 278 |
+
return embeddings, output_dimensions
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
# Copied from transformers.models.beit.modeling_beit.drop_path
|
| 282 |
+
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
|
| 283 |
+
"""
|
| 284 |
+
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
| 285 |
+
|
| 286 |
+
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
|
| 287 |
+
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
| 288 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
|
| 289 |
+
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
|
| 290 |
+
argument.
|
| 291 |
+
"""
|
| 292 |
+
if drop_prob == 0.0 or not training:
|
| 293 |
+
return input
|
| 294 |
+
keep_prob = 1 - drop_prob
|
| 295 |
+
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
| 296 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
|
| 297 |
+
random_tensor.floor_() # binarize
|
| 298 |
+
output = input.div(keep_prob) * random_tensor
|
| 299 |
+
return output
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->FocalNet
|
| 303 |
+
class FocalNetDropPath(nn.Module):
|
| 304 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
| 305 |
+
|
| 306 |
+
def __init__(self, drop_prob: Optional[float] = None) -> None:
|
| 307 |
+
super().__init__()
|
| 308 |
+
self.drop_prob = drop_prob
|
| 309 |
+
|
| 310 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 311 |
+
return drop_path(hidden_states, self.drop_prob, self.training)
|
| 312 |
+
|
| 313 |
+
def extra_repr(self) -> str:
|
| 314 |
+
return "p={}".format(self.drop_prob)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
class FocalNetModulation(nn.Module):
|
| 318 |
+
def __init__(self, config, index, dim, focal_factor=2, bias=True, projection_dropout=0.0):
|
| 319 |
+
super().__init__()
|
| 320 |
+
|
| 321 |
+
self.dim = dim
|
| 322 |
+
self.focal_window = config.focal_windows[index]
|
| 323 |
+
self.focal_level = config.focal_levels[index]
|
| 324 |
+
self.focal_factor = focal_factor
|
| 325 |
+
self.use_post_layernorm_in_modulation = config.use_post_layernorm_in_modulation
|
| 326 |
+
self.normalize_modulator = config.normalize_modulator
|
| 327 |
+
|
| 328 |
+
self.projection_in = nn.Linear(dim, 2 * dim + (self.focal_level + 1), bias=bias)
|
| 329 |
+
self.projection_context = nn.Conv2d(dim, dim, kernel_size=1, stride=1, bias=bias)
|
| 330 |
+
|
| 331 |
+
self.activation = nn.GELU()
|
| 332 |
+
self.projection_out = nn.Linear(dim, dim)
|
| 333 |
+
self.projection_dropout = nn.Dropout(projection_dropout)
|
| 334 |
+
self.focal_layers = nn.ModuleList()
|
| 335 |
+
|
| 336 |
+
self.kernel_sizes = []
|
| 337 |
+
for k in range(self.focal_level):
|
| 338 |
+
kernel_size = self.focal_factor * k + self.focal_window
|
| 339 |
+
self.focal_layers.append(
|
| 340 |
+
nn.Sequential(
|
| 341 |
+
nn.Conv2d(
|
| 342 |
+
dim, dim, kernel_size=kernel_size, stride=1, groups=dim, padding=kernel_size // 2, bias=False
|
| 343 |
+
),
|
| 344 |
+
nn.GELU(),
|
| 345 |
+
)
|
| 346 |
+
)
|
| 347 |
+
self.kernel_sizes.append(kernel_size)
|
| 348 |
+
if self.use_post_layernorm_in_modulation:
|
| 349 |
+
self.layernorm = nn.LayerNorm(dim, eps=config.layer_norm_eps)
|
| 350 |
+
|
| 351 |
+
def forward(self, hidden_state):
|
| 352 |
+
"""
|
| 353 |
+
Args:
|
| 354 |
+
hidden_state:
|
| 355 |
+
Input features with shape of (batch_size, height, width, num_channels)
|
| 356 |
+
"""
|
| 357 |
+
num_channels = hidden_state.shape[-1]
|
| 358 |
+
|
| 359 |
+
# pre linear projection
|
| 360 |
+
x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous()
|
| 361 |
+
q, ctx, self.gates = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1)
|
| 362 |
+
|
| 363 |
+
# context aggreation
|
| 364 |
+
ctx_all = 0
|
| 365 |
+
for level in range(self.focal_level):
|
| 366 |
+
ctx = self.focal_layers[level](ctx)
|
| 367 |
+
ctx_all = ctx_all + ctx * self.gates[:, level : level + 1]
|
| 368 |
+
ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True))
|
| 369 |
+
ctx_all = ctx_all + ctx_global * self.gates[:, self.focal_level :]
|
| 370 |
+
|
| 371 |
+
# normalize context
|
| 372 |
+
if self.normalize_modulator:
|
| 373 |
+
ctx_all = ctx_all / (self.focal_level + 1)
|
| 374 |
+
|
| 375 |
+
# focal modulation
|
| 376 |
+
self.modulator = self.projection_context(ctx_all)
|
| 377 |
+
x_out = q * self.modulator
|
| 378 |
+
x_out = x_out.permute(0, 2, 3, 1).contiguous()
|
| 379 |
+
if self.use_post_layernorm_in_modulation:
|
| 380 |
+
x_out = self.layernorm(x_out)
|
| 381 |
+
|
| 382 |
+
# post linear porjection
|
| 383 |
+
x_out = self.projection_out(x_out)
|
| 384 |
+
x_out = self.projection_dropout(x_out)
|
| 385 |
+
return x_out
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
class FocalNetMlp(nn.Module):
|
| 389 |
+
def __init__(self, config, in_features, hidden_features=None, out_features=None, drop=0.0):
|
| 390 |
+
super().__init__()
|
| 391 |
+
out_features = out_features or in_features
|
| 392 |
+
hidden_features = hidden_features or in_features
|
| 393 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
| 394 |
+
self.activation = ACT2FN[config.hidden_act]
|
| 395 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
| 396 |
+
self.drop = nn.Dropout(drop)
|
| 397 |
+
|
| 398 |
+
def forward(self, hidden_state):
|
| 399 |
+
hidden_state = self.fc1(hidden_state)
|
| 400 |
+
hidden_state = self.activation(hidden_state)
|
| 401 |
+
hidden_state = self.drop(hidden_state)
|
| 402 |
+
hidden_state = self.fc2(hidden_state)
|
| 403 |
+
hidden_state = self.drop(hidden_state)
|
| 404 |
+
return hidden_state
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
class FocalNetLayer(nn.Module):
|
| 408 |
+
r"""Focal Modulation Network layer (block).
|
| 409 |
+
|
| 410 |
+
Args:
|
| 411 |
+
config (`FocalNetConfig`):
|
| 412 |
+
Model config.
|
| 413 |
+
index (`int`):
|
| 414 |
+
Layer index.
|
| 415 |
+
dim (`int`):
|
| 416 |
+
Number of input channels.
|
| 417 |
+
input_resolution (`Tuple[int]`):
|
| 418 |
+
Input resulotion.
|
| 419 |
+
drop_path (`float`, *optional*, defaults to 0.0):
|
| 420 |
+
Stochastic depth rate.
|
| 421 |
+
"""
|
| 422 |
+
|
| 423 |
+
def __init__(self, config, index, dim, input_resolution, drop_path=0.0):
|
| 424 |
+
super().__init__()
|
| 425 |
+
|
| 426 |
+
self.config = config
|
| 427 |
+
|
| 428 |
+
# layer-specific attributes
|
| 429 |
+
self.dim = dim
|
| 430 |
+
self.input_resolution = input_resolution
|
| 431 |
+
|
| 432 |
+
# general attributes
|
| 433 |
+
self.drop = config.hidden_dropout_prob
|
| 434 |
+
self.use_post_layernorm = config.use_post_layernorm
|
| 435 |
+
|
| 436 |
+
self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
|
| 437 |
+
self.modulation = FocalNetModulation(
|
| 438 |
+
config=config,
|
| 439 |
+
index=index,
|
| 440 |
+
dim=dim,
|
| 441 |
+
projection_dropout=self.drop,
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
self.drop_path = FocalNetDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 445 |
+
self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
|
| 446 |
+
mlp_hidden_dim = int(dim * config.mlp_ratio)
|
| 447 |
+
self.mlp = FocalNetMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=self.drop)
|
| 448 |
+
|
| 449 |
+
self.gamma_1 = 1.0
|
| 450 |
+
self.gamma_2 = 1.0
|
| 451 |
+
if config.use_layerscale:
|
| 452 |
+
self.gamma_1 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True)
|
| 453 |
+
self.gamma_2 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True)
|
| 454 |
+
|
| 455 |
+
def forward(self, hidden_state, input_dimensions):
|
| 456 |
+
height, width = input_dimensions
|
| 457 |
+
batch_size, _, num_channels = hidden_state.shape
|
| 458 |
+
shortcut = hidden_state
|
| 459 |
+
|
| 460 |
+
# Focal Modulation
|
| 461 |
+
hidden_state = hidden_state if self.use_post_layernorm else self.norm1(hidden_state)
|
| 462 |
+
hidden_state = hidden_state.view(batch_size, height, width, num_channels)
|
| 463 |
+
hidden_state = self.modulation(hidden_state).view(batch_size, height * width, num_channels)
|
| 464 |
+
hidden_state = hidden_state if not self.use_post_layernorm else self.norm1(hidden_state)
|
| 465 |
+
|
| 466 |
+
# FFN
|
| 467 |
+
hidden_state = shortcut + self.drop_path(self.gamma_1 * hidden_state)
|
| 468 |
+
hidden_state = hidden_state + self.drop_path(
|
| 469 |
+
self.gamma_2
|
| 470 |
+
* (self.norm2(self.mlp(hidden_state)) if self.use_post_layernorm else self.mlp(self.norm2(hidden_state)))
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
return hidden_state
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
class FocalNetStage(nn.Module):
|
| 477 |
+
def __init__(self, config, index, input_resolution):
|
| 478 |
+
super().__init__()
|
| 479 |
+
|
| 480 |
+
self.config = config
|
| 481 |
+
self.num_stages = len(config.depths)
|
| 482 |
+
|
| 483 |
+
embed_dim = [config.embed_dim * (2**i) for i in range(self.num_stages)]
|
| 484 |
+
dim = embed_dim[index]
|
| 485 |
+
out_dim = embed_dim[index + 1] if (index < self.num_stages - 1) else None
|
| 486 |
+
downsample = FocalNetPatchEmbeddings if (index < self.num_stages - 1) else None
|
| 487 |
+
|
| 488 |
+
# stochastic depth decay rule
|
| 489 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
|
| 490 |
+
drop_path = dpr[sum(config.depths[:index]) : sum(config.depths[: index + 1])]
|
| 491 |
+
|
| 492 |
+
self.layers = nn.ModuleList(
|
| 493 |
+
[
|
| 494 |
+
FocalNetLayer(
|
| 495 |
+
config=config,
|
| 496 |
+
index=index,
|
| 497 |
+
dim=dim,
|
| 498 |
+
input_resolution=input_resolution,
|
| 499 |
+
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
| 500 |
+
)
|
| 501 |
+
for i in range(config.depths[index])
|
| 502 |
+
]
|
| 503 |
+
)
|
| 504 |
+
|
| 505 |
+
if downsample is not None:
|
| 506 |
+
self.downsample = downsample(
|
| 507 |
+
config=config,
|
| 508 |
+
image_size=input_resolution,
|
| 509 |
+
patch_size=2,
|
| 510 |
+
num_channels=dim,
|
| 511 |
+
embed_dim=out_dim,
|
| 512 |
+
add_norm=True,
|
| 513 |
+
use_conv_embed=config.use_conv_embed,
|
| 514 |
+
is_stem=False,
|
| 515 |
+
)
|
| 516 |
+
else:
|
| 517 |
+
self.downsample = None
|
| 518 |
+
|
| 519 |
+
self.pointing = False
|
| 520 |
+
|
| 521 |
+
def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int]) -> Tuple[torch.Tensor]:
|
| 522 |
+
height, width = input_dimensions
|
| 523 |
+
for layer_module in self.layers:
|
| 524 |
+
hidden_states = layer_module(hidden_states, input_dimensions)
|
| 525 |
+
|
| 526 |
+
hidden_states_before_downsampling = hidden_states
|
| 527 |
+
if self.downsample is not None:
|
| 528 |
+
height, width = input_dimensions
|
| 529 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(
|
| 530 |
+
hidden_states_before_downsampling.shape[0], -1, height, width
|
| 531 |
+
)
|
| 532 |
+
hidden_states, output_dimensions = self.downsample(hidden_states)
|
| 533 |
+
|
| 534 |
+
else:
|
| 535 |
+
output_dimensions = (height, width, height, width)
|
| 536 |
+
|
| 537 |
+
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
|
| 538 |
+
|
| 539 |
+
return stage_outputs
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
class FocalNetEncoder(nn.Module):
|
| 543 |
+
def __init__(self, config, grid_size):
|
| 544 |
+
super().__init__()
|
| 545 |
+
self.num_stages = len(config.depths)
|
| 546 |
+
self.config = config
|
| 547 |
+
|
| 548 |
+
self.stages = nn.ModuleList(
|
| 549 |
+
[
|
| 550 |
+
FocalNetStage(
|
| 551 |
+
config=config,
|
| 552 |
+
index=i_layer,
|
| 553 |
+
input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
|
| 554 |
+
)
|
| 555 |
+
for i_layer in range(self.num_stages)
|
| 556 |
+
]
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
self.gradient_checkpointing = False
|
| 560 |
+
|
| 561 |
+
def forward(
|
| 562 |
+
self,
|
| 563 |
+
hidden_states: torch.Tensor,
|
| 564 |
+
input_dimensions: Tuple[int, int],
|
| 565 |
+
output_hidden_states: Optional[bool] = False,
|
| 566 |
+
output_hidden_states_before_downsampling: Optional[bool] = False,
|
| 567 |
+
return_dict: Optional[bool] = True,
|
| 568 |
+
) -> Union[Tuple, FocalNetEncoderOutput]:
|
| 569 |
+
all_hidden_states = () if output_hidden_states else None
|
| 570 |
+
all_reshaped_hidden_states = () if output_hidden_states else None
|
| 571 |
+
|
| 572 |
+
if output_hidden_states:
|
| 573 |
+
batch_size, _, hidden_size = hidden_states.shape
|
| 574 |
+
# rearrange b (h w) c -> b c h w
|
| 575 |
+
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
|
| 576 |
+
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
|
| 577 |
+
all_hidden_states += (hidden_states,)
|
| 578 |
+
all_reshaped_hidden_states += (reshaped_hidden_state,)
|
| 579 |
+
|
| 580 |
+
for i, stage_module in enumerate(self.stages):
|
| 581 |
+
if self.gradient_checkpointing and self.training:
|
| 582 |
+
stage_outputs = self._gradient_checkpointing_func(
|
| 583 |
+
stage_module.__call__,
|
| 584 |
+
hidden_states,
|
| 585 |
+
input_dimensions,
|
| 586 |
+
)
|
| 587 |
+
else:
|
| 588 |
+
stage_outputs = stage_module(hidden_states, input_dimensions)
|
| 589 |
+
|
| 590 |
+
hidden_states = stage_outputs[0]
|
| 591 |
+
hidden_states_before_downsampling = stage_outputs[1]
|
| 592 |
+
output_dimensions = stage_outputs[2]
|
| 593 |
+
|
| 594 |
+
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
|
| 595 |
+
|
| 596 |
+
if output_hidden_states and output_hidden_states_before_downsampling:
|
| 597 |
+
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
|
| 598 |
+
# rearrange b (h w) c -> b c h w
|
| 599 |
+
# here we use the original (not downsampled) height and width
|
| 600 |
+
reshaped_hidden_state = hidden_states_before_downsampling.view(
|
| 601 |
+
batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
|
| 602 |
+
)
|
| 603 |
+
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
|
| 604 |
+
all_hidden_states += (hidden_states_before_downsampling,)
|
| 605 |
+
all_reshaped_hidden_states += (reshaped_hidden_state,)
|
| 606 |
+
elif output_hidden_states and not output_hidden_states_before_downsampling:
|
| 607 |
+
batch_size, _, hidden_size = hidden_states.shape
|
| 608 |
+
# rearrange b (h w) c -> b c h w
|
| 609 |
+
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
|
| 610 |
+
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
|
| 611 |
+
all_hidden_states += (hidden_states,)
|
| 612 |
+
all_reshaped_hidden_states += (reshaped_hidden_state,)
|
| 613 |
+
|
| 614 |
+
if not return_dict:
|
| 615 |
+
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
|
| 616 |
+
|
| 617 |
+
return FocalNetEncoderOutput(
|
| 618 |
+
last_hidden_state=hidden_states,
|
| 619 |
+
hidden_states=all_hidden_states,
|
| 620 |
+
reshaped_hidden_states=all_reshaped_hidden_states,
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
# Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->FocalNet,swin->focalnet
|
| 625 |
+
class FocalNetPreTrainedModel(PreTrainedModel):
|
| 626 |
+
"""
|
| 627 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 628 |
+
models.
|
| 629 |
+
"""
|
| 630 |
+
|
| 631 |
+
config_class = FocalNetConfig
|
| 632 |
+
base_model_prefix = "focalnet"
|
| 633 |
+
main_input_name = "pixel_values"
|
| 634 |
+
supports_gradient_checkpointing = True
|
| 635 |
+
_no_split_modules = ["FocalNetStage"]
|
| 636 |
+
|
| 637 |
+
def _init_weights(self, module):
|
| 638 |
+
"""Initialize the weights"""
|
| 639 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
| 640 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 641 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 642 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 643 |
+
if module.bias is not None:
|
| 644 |
+
module.bias.data.zero_()
|
| 645 |
+
elif isinstance(module, nn.LayerNorm):
|
| 646 |
+
module.bias.data.zero_()
|
| 647 |
+
module.weight.data.fill_(1.0)
|
| 648 |
+
|
| 649 |
+
|
| 650 |
+
FOCALNET_START_DOCSTRING = r"""
|
| 651 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
| 652 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 653 |
+
behavior.
|
| 654 |
+
|
| 655 |
+
Parameters:
|
| 656 |
+
config ([`FocalNetConfig`]): Model configuration class with all the parameters of the model.
|
| 657 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 658 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 659 |
+
"""
|
| 660 |
+
|
| 661 |
+
FOCALNET_INPUTS_DOCSTRING = r"""
|
| 662 |
+
Args:
|
| 663 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 664 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 665 |
+
[`AutoImageProcessor.__call__`] for details.
|
| 666 |
+
|
| 667 |
+
output_hidden_states (`bool`, *optional*):
|
| 668 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 669 |
+
more detail.
|
| 670 |
+
return_dict (`bool`, *optional*):
|
| 671 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 672 |
+
"""
|
| 673 |
+
|
| 674 |
+
|
| 675 |
+
@add_start_docstrings(
|
| 676 |
+
"The bare FocalNet Model outputting raw hidden-states without any specific head on top.",
|
| 677 |
+
FOCALNET_START_DOCSTRING,
|
| 678 |
+
)
|
| 679 |
+
class FocalNetModel(FocalNetPreTrainedModel):
|
| 680 |
+
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
|
| 681 |
+
super().__init__(config)
|
| 682 |
+
self.config = config
|
| 683 |
+
self.num_stages = len(config.depths)
|
| 684 |
+
self.num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
|
| 685 |
+
|
| 686 |
+
self.embeddings = FocalNetEmbeddings(config, use_mask_token=use_mask_token)
|
| 687 |
+
self.encoder = FocalNetEncoder(config, self.embeddings.patch_grid)
|
| 688 |
+
|
| 689 |
+
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
|
| 690 |
+
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
|
| 691 |
+
|
| 692 |
+
# Initialize weights and apply final processing
|
| 693 |
+
self.post_init()
|
| 694 |
+
|
| 695 |
+
def get_input_embeddings(self):
|
| 696 |
+
return self.embeddings.patch_embeddings
|
| 697 |
+
|
| 698 |
+
@add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
|
| 699 |
+
@add_code_sample_docstrings(
|
| 700 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 701 |
+
output_type=FocalNetModelOutput,
|
| 702 |
+
config_class=_CONFIG_FOR_DOC,
|
| 703 |
+
modality="vision",
|
| 704 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
| 705 |
+
)
|
| 706 |
+
def forward(
|
| 707 |
+
self,
|
| 708 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 709 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
| 710 |
+
output_hidden_states: Optional[bool] = None,
|
| 711 |
+
return_dict: Optional[bool] = None,
|
| 712 |
+
) -> Union[Tuple, FocalNetModelOutput]:
|
| 713 |
+
r"""
|
| 714 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
|
| 715 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
| 716 |
+
"""
|
| 717 |
+
output_hidden_states = (
|
| 718 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 719 |
+
)
|
| 720 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 721 |
+
|
| 722 |
+
if pixel_values is None:
|
| 723 |
+
raise ValueError("You have to specify pixel_values")
|
| 724 |
+
|
| 725 |
+
embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
|
| 726 |
+
|
| 727 |
+
encoder_outputs = self.encoder(
|
| 728 |
+
embedding_output,
|
| 729 |
+
input_dimensions,
|
| 730 |
+
output_hidden_states=output_hidden_states,
|
| 731 |
+
return_dict=return_dict,
|
| 732 |
+
)
|
| 733 |
+
|
| 734 |
+
sequence_output = encoder_outputs[0]
|
| 735 |
+
sequence_output = self.layernorm(sequence_output)
|
| 736 |
+
|
| 737 |
+
pooled_output = None
|
| 738 |
+
if self.pooler is not None:
|
| 739 |
+
pooled_output = self.pooler(sequence_output.transpose(1, 2))
|
| 740 |
+
pooled_output = torch.flatten(pooled_output, 1)
|
| 741 |
+
|
| 742 |
+
if not return_dict:
|
| 743 |
+
output = (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 744 |
+
|
| 745 |
+
return output
|
| 746 |
+
|
| 747 |
+
return FocalNetModelOutput(
|
| 748 |
+
last_hidden_state=sequence_output,
|
| 749 |
+
pooler_output=pooled_output,
|
| 750 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 751 |
+
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
|
| 752 |
+
)
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
@add_start_docstrings(
|
| 756 |
+
"""FocalNet Model with a decoder on top for masked image modeling.
|
| 757 |
+
|
| 758 |
+
This follows the same implementation as in [SimMIM](https://arxiv.org/abs/2111.09886).
|
| 759 |
+
|
| 760 |
+
<Tip>
|
| 761 |
+
|
| 762 |
+
Note that we provide a script to pre-train this model on custom data in our [examples
|
| 763 |
+
directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
|
| 764 |
+
|
| 765 |
+
</Tip>
|
| 766 |
+
""",
|
| 767 |
+
FOCALNET_START_DOCSTRING,
|
| 768 |
+
)
|
| 769 |
+
class FocalNetForMaskedImageModeling(FocalNetPreTrainedModel):
|
| 770 |
+
def __init__(self, config):
|
| 771 |
+
super().__init__(config)
|
| 772 |
+
|
| 773 |
+
self.focalnet = FocalNetModel(config, add_pooling_layer=False, use_mask_token=True)
|
| 774 |
+
|
| 775 |
+
self.num_stages = len(config.depths)
|
| 776 |
+
num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
|
| 777 |
+
self.decoder = nn.Sequential(
|
| 778 |
+
nn.Conv2d(
|
| 779 |
+
in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1
|
| 780 |
+
),
|
| 781 |
+
nn.PixelShuffle(config.encoder_stride),
|
| 782 |
+
)
|
| 783 |
+
|
| 784 |
+
# Initialize weights and apply final processing
|
| 785 |
+
self.post_init()
|
| 786 |
+
|
| 787 |
+
@add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
|
| 788 |
+
@replace_return_docstrings(output_type=FocalNetMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
|
| 789 |
+
def forward(
|
| 790 |
+
self,
|
| 791 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 792 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
| 793 |
+
output_hidden_states: Optional[bool] = None,
|
| 794 |
+
return_dict: Optional[bool] = None,
|
| 795 |
+
) -> Union[Tuple, FocalNetMaskedImageModelingOutput]:
|
| 796 |
+
r"""
|
| 797 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
|
| 798 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
| 799 |
+
|
| 800 |
+
Returns:
|
| 801 |
+
|
| 802 |
+
Examples:
|
| 803 |
+
```python
|
| 804 |
+
>>> from transformers import AutoImageProcessor, FocalNetConfig, FocalNetForMaskedImageModeling
|
| 805 |
+
>>> import torch
|
| 806 |
+
>>> from PIL import Image
|
| 807 |
+
>>> import requests
|
| 808 |
+
|
| 809 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 810 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 811 |
+
|
| 812 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-base-simmim-window6-192")
|
| 813 |
+
>>> config = FocalNetConfig()
|
| 814 |
+
>>> model = FocalNetForMaskedImageModeling(config)
|
| 815 |
+
|
| 816 |
+
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
|
| 817 |
+
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
|
| 818 |
+
>>> # create random boolean mask of shape (batch_size, num_patches)
|
| 819 |
+
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
|
| 820 |
+
|
| 821 |
+
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
|
| 822 |
+
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.logits
|
| 823 |
+
>>> list(reconstructed_pixel_values.shape)
|
| 824 |
+
[1, 3, 192, 192]
|
| 825 |
+
```"""
|
| 826 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 827 |
+
|
| 828 |
+
outputs = self.focalnet(
|
| 829 |
+
pixel_values,
|
| 830 |
+
bool_masked_pos=bool_masked_pos,
|
| 831 |
+
output_hidden_states=output_hidden_states,
|
| 832 |
+
return_dict=return_dict,
|
| 833 |
+
)
|
| 834 |
+
|
| 835 |
+
sequence_output = outputs[0]
|
| 836 |
+
# Reshape to (batch_size, num_channels, height, width)
|
| 837 |
+
sequence_output = sequence_output.transpose(1, 2)
|
| 838 |
+
batch_size, num_channels, sequence_length = sequence_output.shape
|
| 839 |
+
height = width = math.floor(sequence_length**0.5)
|
| 840 |
+
sequence_output = sequence_output.reshape(batch_size, num_channels, height, width)
|
| 841 |
+
|
| 842 |
+
# Reconstruct pixel values
|
| 843 |
+
reconstructed_pixel_values = self.decoder(sequence_output)
|
| 844 |
+
|
| 845 |
+
masked_im_loss = None
|
| 846 |
+
if bool_masked_pos is not None:
|
| 847 |
+
size = self.config.image_size // self.config.patch_size
|
| 848 |
+
bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
|
| 849 |
+
mask = (
|
| 850 |
+
bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
|
| 851 |
+
.repeat_interleave(self.config.patch_size, 2)
|
| 852 |
+
.unsqueeze(1)
|
| 853 |
+
.contiguous()
|
| 854 |
+
)
|
| 855 |
+
reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
|
| 856 |
+
masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
|
| 857 |
+
|
| 858 |
+
if not return_dict:
|
| 859 |
+
output = (reconstructed_pixel_values,) + outputs[2:]
|
| 860 |
+
return ((masked_im_loss,) + output) if masked_im_loss is not None else output
|
| 861 |
+
|
| 862 |
+
return FocalNetMaskedImageModelingOutput(
|
| 863 |
+
loss=masked_im_loss,
|
| 864 |
+
reconstruction=reconstructed_pixel_values,
|
| 865 |
+
hidden_states=outputs.hidden_states,
|
| 866 |
+
reshaped_hidden_states=outputs.reshaped_hidden_states,
|
| 867 |
+
)
|
| 868 |
+
|
| 869 |
+
|
| 870 |
+
@add_start_docstrings(
|
| 871 |
+
"""
|
| 872 |
+
FocalNet Model with an image classification head on top (a linear layer on top of the pooled output) e.g. for
|
| 873 |
+
ImageNet.
|
| 874 |
+
""",
|
| 875 |
+
FOCALNET_START_DOCSTRING,
|
| 876 |
+
)
|
| 877 |
+
class FocalNetForImageClassification(FocalNetPreTrainedModel):
|
| 878 |
+
# Copied from transformers.models.swin.modeling_swin.SwinForImageClassification.__init__ with Swin->FocalNet, swin->focalnet
|
| 879 |
+
def __init__(self, config):
|
| 880 |
+
super().__init__(config)
|
| 881 |
+
|
| 882 |
+
self.num_labels = config.num_labels
|
| 883 |
+
self.focalnet = FocalNetModel(config)
|
| 884 |
+
|
| 885 |
+
# Classifier head
|
| 886 |
+
self.classifier = (
|
| 887 |
+
nn.Linear(self.focalnet.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
| 888 |
+
)
|
| 889 |
+
|
| 890 |
+
# Initialize weights and apply final processing
|
| 891 |
+
self.post_init()
|
| 892 |
+
|
| 893 |
+
@add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
|
| 894 |
+
@add_code_sample_docstrings(
|
| 895 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
| 896 |
+
output_type=FocalNetImageClassifierOutput,
|
| 897 |
+
config_class=_CONFIG_FOR_DOC,
|
| 898 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
| 899 |
+
)
|
| 900 |
+
def forward(
|
| 901 |
+
self,
|
| 902 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 903 |
+
labels: Optional[torch.LongTensor] = None,
|
| 904 |
+
output_hidden_states: Optional[bool] = None,
|
| 905 |
+
return_dict: Optional[bool] = None,
|
| 906 |
+
) -> Union[Tuple, FocalNetImageClassifierOutput]:
|
| 907 |
+
r"""
|
| 908 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 909 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
| 910 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 911 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 912 |
+
"""
|
| 913 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 914 |
+
|
| 915 |
+
outputs = self.focalnet(
|
| 916 |
+
pixel_values,
|
| 917 |
+
output_hidden_states=output_hidden_states,
|
| 918 |
+
return_dict=return_dict,
|
| 919 |
+
)
|
| 920 |
+
|
| 921 |
+
pooled_output = outputs[1]
|
| 922 |
+
|
| 923 |
+
logits = self.classifier(pooled_output)
|
| 924 |
+
|
| 925 |
+
loss = None
|
| 926 |
+
if labels is not None:
|
| 927 |
+
if self.config.problem_type is None:
|
| 928 |
+
if self.num_labels == 1:
|
| 929 |
+
self.config.problem_type = "regression"
|
| 930 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 931 |
+
self.config.problem_type = "single_label_classification"
|
| 932 |
+
else:
|
| 933 |
+
self.config.problem_type = "multi_label_classification"
|
| 934 |
+
|
| 935 |
+
if self.config.problem_type == "regression":
|
| 936 |
+
loss_fct = MSELoss()
|
| 937 |
+
if self.num_labels == 1:
|
| 938 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 939 |
+
else:
|
| 940 |
+
loss = loss_fct(logits, labels)
|
| 941 |
+
elif self.config.problem_type == "single_label_classification":
|
| 942 |
+
loss_fct = CrossEntropyLoss()
|
| 943 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 944 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 945 |
+
loss_fct = BCEWithLogitsLoss()
|
| 946 |
+
loss = loss_fct(logits, labels)
|
| 947 |
+
|
| 948 |
+
if not return_dict:
|
| 949 |
+
output = (logits,) + outputs[2:]
|
| 950 |
+
return ((loss,) + output) if loss is not None else output
|
| 951 |
+
|
| 952 |
+
return FocalNetImageClassifierOutput(
|
| 953 |
+
loss=loss,
|
| 954 |
+
logits=logits,
|
| 955 |
+
hidden_states=outputs.hidden_states,
|
| 956 |
+
reshaped_hidden_states=outputs.reshaped_hidden_states,
|
| 957 |
+
)
|
| 958 |
+
|
| 959 |
+
|
| 960 |
+
@add_start_docstrings(
|
| 961 |
+
"""
|
| 962 |
+
FocalNet backbone, to be used with frameworks like X-Decoder.
|
| 963 |
+
""",
|
| 964 |
+
FOCALNET_START_DOCSTRING,
|
| 965 |
+
)
|
| 966 |
+
class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin):
|
| 967 |
+
def __init__(self, config: FocalNetConfig):
|
| 968 |
+
super().__init__(config)
|
| 969 |
+
super()._init_backbone(config)
|
| 970 |
+
|
| 971 |
+
self.num_features = [config.embed_dim] + config.hidden_sizes
|
| 972 |
+
self.focalnet = FocalNetModel(config)
|
| 973 |
+
|
| 974 |
+
# initialize weights and apply final processing
|
| 975 |
+
self.post_init()
|
| 976 |
+
|
| 977 |
+
@add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
|
| 978 |
+
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
|
| 979 |
+
def forward(
|
| 980 |
+
self,
|
| 981 |
+
pixel_values: torch.Tensor,
|
| 982 |
+
output_hidden_states: Optional[bool] = None,
|
| 983 |
+
return_dict: Optional[bool] = None,
|
| 984 |
+
) -> BackboneOutput:
|
| 985 |
+
"""
|
| 986 |
+
Returns:
|
| 987 |
+
|
| 988 |
+
Examples:
|
| 989 |
+
|
| 990 |
+
```python
|
| 991 |
+
>>> from transformers import AutoImageProcessor, AutoBackbone
|
| 992 |
+
>>> import torch
|
| 993 |
+
>>> from PIL import Image
|
| 994 |
+
>>> import requests
|
| 995 |
+
|
| 996 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 997 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 998 |
+
|
| 999 |
+
>>> processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny-lrf")
|
| 1000 |
+
>>> model = AutoBackbone.from_pretrained("microsoft/focalnet-tiny-lrf")
|
| 1001 |
+
|
| 1002 |
+
>>> inputs = processor(image, return_tensors="pt")
|
| 1003 |
+
>>> outputs = model(**inputs)
|
| 1004 |
+
```"""
|
| 1005 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1006 |
+
output_hidden_states = (
|
| 1007 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1008 |
+
)
|
| 1009 |
+
|
| 1010 |
+
outputs = self.focalnet(pixel_values, output_hidden_states=True, return_dict=True)
|
| 1011 |
+
|
| 1012 |
+
hidden_states = outputs.reshaped_hidden_states
|
| 1013 |
+
|
| 1014 |
+
feature_maps = ()
|
| 1015 |
+
for idx, stage in enumerate(self.stage_names):
|
| 1016 |
+
if stage in self.out_features:
|
| 1017 |
+
feature_maps += (hidden_states[idx],)
|
| 1018 |
+
|
| 1019 |
+
if not return_dict:
|
| 1020 |
+
output = (feature_maps,)
|
| 1021 |
+
if output_hidden_states:
|
| 1022 |
+
output += (outputs.hidden_states,)
|
| 1023 |
+
return output
|
| 1024 |
+
|
| 1025 |
+
return BackboneOutput(
|
| 1026 |
+
feature_maps=feature_maps,
|
| 1027 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
| 1028 |
+
attentions=None,
|
| 1029 |
+
)
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
__all__ = [
|
| 1033 |
+
"FocalNetForImageClassification",
|
| 1034 |
+
"FocalNetForMaskedImageModeling",
|
| 1035 |
+
"FocalNetBackbone",
|
| 1036 |
+
"FocalNetModel",
|
| 1037 |
+
"FocalNetPreTrainedModel",
|
| 1038 |
+
]
|
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__init__.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_import_structure = {
|
| 20 |
+
"configuration_instructblipvideo": [
|
| 21 |
+
"InstructBlipVideoConfig",
|
| 22 |
+
"InstructBlipVideoQFormerConfig",
|
| 23 |
+
"InstructBlipVideoVisionConfig",
|
| 24 |
+
],
|
| 25 |
+
"processing_instructblipvideo": ["InstructBlipVideoProcessor"],
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
if not is_vision_available():
|
| 31 |
+
raise OptionalDependencyNotAvailable()
|
| 32 |
+
except OptionalDependencyNotAvailable:
|
| 33 |
+
pass
|
| 34 |
+
else:
|
| 35 |
+
_import_structure["image_processing_instructblipvideo"] = ["InstructBlipVideoImageProcessor"]
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
if not is_torch_available():
|
| 40 |
+
raise OptionalDependencyNotAvailable()
|
| 41 |
+
except OptionalDependencyNotAvailable:
|
| 42 |
+
pass
|
| 43 |
+
else:
|
| 44 |
+
_import_structure["modeling_instructblipvideo"] = [
|
| 45 |
+
"InstructBlipVideoQFormerModel",
|
| 46 |
+
"InstructBlipVideoPreTrainedModel",
|
| 47 |
+
"InstructBlipVideoForConditionalGeneration",
|
| 48 |
+
"InstructBlipVideoVisionModel",
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
if TYPE_CHECKING:
|
| 52 |
+
from .configuration_instructblipvideo import (
|
| 53 |
+
InstructBlipVideoConfig,
|
| 54 |
+
InstructBlipVideoQFormerConfig,
|
| 55 |
+
InstructBlipVideoVisionConfig,
|
| 56 |
+
)
|
| 57 |
+
from .processing_instructblipvideo import InstructBlipVideoProcessor
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
if not is_vision_available():
|
| 61 |
+
raise OptionalDependencyNotAvailable()
|
| 62 |
+
except OptionalDependencyNotAvailable:
|
| 63 |
+
pass
|
| 64 |
+
else:
|
| 65 |
+
from .image_processing_instructblipvideo import InstructBlipVideoImageProcessor
|
| 66 |
+
|
| 67 |
+
try:
|
| 68 |
+
if not is_torch_available():
|
| 69 |
+
raise OptionalDependencyNotAvailable()
|
| 70 |
+
except OptionalDependencyNotAvailable:
|
| 71 |
+
pass
|
| 72 |
+
else:
|
| 73 |
+
from .modeling_instructblipvideo import (
|
| 74 |
+
InstructBlipVideoForConditionalGeneration,
|
| 75 |
+
InstructBlipVideoPreTrainedModel,
|
| 76 |
+
InstructBlipVideoQFormerModel,
|
| 77 |
+
InstructBlipVideoVisionModel,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
else:
|
| 81 |
+
import sys
|
| 82 |
+
|
| 83 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/configuration_instructblipvideo.py
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 2 |
+
# This file was automatically generated from src/transformers/models/instructblipvideo/modular_instructblipvideo.py.
|
| 3 |
+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
| 4 |
+
# the file from the modular. If any change should be done, please apply the change to the
|
| 5 |
+
# modular_instructblipvideo.py file directly. One of our CI enforces this.
|
| 6 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 7 |
+
# coding=utf-8
|
| 8 |
+
# Copyright 2024 HuggingFace Inc. team. All rights reserved.
|
| 9 |
+
#
|
| 10 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 11 |
+
# you may not use this file except in compliance with the License.
|
| 12 |
+
# You may obtain a copy of the License at
|
| 13 |
+
#
|
| 14 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 15 |
+
#
|
| 16 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 17 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 18 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 19 |
+
# See the License for the specific language governing permissions and
|
| 20 |
+
# limitations under the License.
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
from ...configuration_utils import PretrainedConfig
|
| 24 |
+
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
|
| 25 |
+
from ...utils import logging
|
| 26 |
+
from ..auto import CONFIG_MAPPING, AutoConfig
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class InstructBlipVideoVisionConfig(PretrainedConfig):
|
| 33 |
+
r"""
|
| 34 |
+
This is the configuration class to store the configuration of a [`InstructBlipVideoVisionModel`]. It is used to
|
| 35 |
+
instantiate a InstructBlipVideo vision encoder according to the specified arguments, defining the model architecture.
|
| 36 |
+
Instantiating a configuration defaults will yield a similar configuration to that of the InstructBlipVideo
|
| 37 |
+
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
|
| 38 |
+
|
| 39 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 40 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
hidden_size (`int`, *optional*, defaults to 1408):
|
| 44 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 45 |
+
intermediate_size (`int`, *optional*, defaults to 6144):
|
| 46 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 47 |
+
num_hidden_layers (`int`, *optional*, defaults to 39):
|
| 48 |
+
Number of hidden layers in the Transformer encoder.
|
| 49 |
+
num_attention_heads (`int`, *optional*, defaults to 16):
|
| 50 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 51 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 52 |
+
The size (resolution) of each image.
|
| 53 |
+
patch_size (`int`, *optional*, defaults to 14):
|
| 54 |
+
The size (resolution) of each patch.
|
| 55 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 56 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 57 |
+
`"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. to 1e-5): The epsilon used by the layer
|
| 58 |
+
normalization layers.
|
| 59 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| 60 |
+
The epsilon used by the layer normalization layers.
|
| 61 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 62 |
+
The dropout ratio for the attention probabilities.
|
| 63 |
+
initializer_range (`float`, *optional*, defaults to 1e-10):
|
| 64 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 65 |
+
qkv_bias (`bool`, *optional*, defaults to `True`):
|
| 66 |
+
Whether to add a bias to the queries and values in the self-attention layers.
|
| 67 |
+
|
| 68 |
+
Example:
|
| 69 |
+
|
| 70 |
+
```python
|
| 71 |
+
>>> from transformers import InstructBlipVideoVisionConfig, InstructBlipVideoVisionModel
|
| 72 |
+
|
| 73 |
+
>>> # Initializing a InstructBlipVideoVisionConfig with Salesforce/instruct-blip-flan-t5 style configuration
|
| 74 |
+
>>> configuration = InstructBlipVideoVisionConfig()
|
| 75 |
+
|
| 76 |
+
>>> # Initializing a InstructBlipVideoVisionModel (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
|
| 77 |
+
>>> model = InstructBlipVideoVisionModel(configuration)
|
| 78 |
+
|
| 79 |
+
>>> # Accessing the model configuration
|
| 80 |
+
>>> configuration = model.config
|
| 81 |
+
```"""
|
| 82 |
+
|
| 83 |
+
model_type = "instructblipvideo_vision_model"
|
| 84 |
+
base_config_key = "vision_config"
|
| 85 |
+
|
| 86 |
+
def __init__(
|
| 87 |
+
self,
|
| 88 |
+
hidden_size=1408,
|
| 89 |
+
intermediate_size=6144,
|
| 90 |
+
num_hidden_layers=39,
|
| 91 |
+
num_attention_heads=16,
|
| 92 |
+
image_size=224,
|
| 93 |
+
patch_size=14,
|
| 94 |
+
hidden_act="gelu",
|
| 95 |
+
layer_norm_eps=1e-6,
|
| 96 |
+
attention_dropout=0.0,
|
| 97 |
+
initializer_range=1e-10,
|
| 98 |
+
qkv_bias=True,
|
| 99 |
+
**kwargs,
|
| 100 |
+
):
|
| 101 |
+
super().__init__(**kwargs)
|
| 102 |
+
|
| 103 |
+
self.hidden_size = hidden_size
|
| 104 |
+
self.intermediate_size = intermediate_size
|
| 105 |
+
self.num_hidden_layers = num_hidden_layers
|
| 106 |
+
self.num_attention_heads = num_attention_heads
|
| 107 |
+
self.patch_size = patch_size
|
| 108 |
+
self.image_size = image_size
|
| 109 |
+
self.initializer_range = initializer_range
|
| 110 |
+
self.attention_dropout = attention_dropout
|
| 111 |
+
self.layer_norm_eps = layer_norm_eps
|
| 112 |
+
self.hidden_act = hidden_act
|
| 113 |
+
self.qkv_bias = qkv_bias
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class InstructBlipVideoQFormerConfig(PretrainedConfig):
|
| 117 |
+
r"""
|
| 118 |
+
This is the configuration class to store the configuration of a [`InstructBlipVideoQFormerModel`]. It is used to
|
| 119 |
+
instantiate a InstructBlipVideo Querying Transformer (Q-Former) model according to the specified arguments, defining the
|
| 120 |
+
model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
|
| 121 |
+
the InstructBlipVideo [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5)
|
| 122 |
+
architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.
|
| 123 |
+
Read the documentation from [`PretrainedConfig`] for more information.
|
| 124 |
+
|
| 125 |
+
Note that [`InstructBlipVideoQFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
| 129 |
+
Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by
|
| 130 |
+
the `inputs_ids` passed when calling the model.
|
| 131 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 132 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 133 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 134 |
+
Number of hidden layers in the Transformer encoder.
|
| 135 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 136 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 137 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 138 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
| 139 |
+
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
|
| 140 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 141 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
| 142 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 143 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 144 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 145 |
+
The dropout ratio for the attention probabilities.
|
| 146 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
| 147 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 148 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 149 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 150 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 151 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 152 |
+
The epsilon used by the layer normalization layers.
|
| 153 |
+
pad_token_id (`int`, *optional*, defaults to 0):
|
| 154 |
+
Token id used for padding sequences.
|
| 155 |
+
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
|
| 156 |
+
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
|
| 157 |
+
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
|
| 158 |
+
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
|
| 159 |
+
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
|
| 160 |
+
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
|
| 161 |
+
cross_attention_frequency (`int`, *optional*, defaults to 2):
|
| 162 |
+
The frequency of adding cross-attention to the Transformer layers.
|
| 163 |
+
encoder_hidden_size (`int`, *optional*, defaults to 1408):
|
| 164 |
+
The hidden size of the hidden states for cross-attention.
|
| 165 |
+
|
| 166 |
+
Examples:
|
| 167 |
+
|
| 168 |
+
```python
|
| 169 |
+
>>> from transformers import InstructBlipVideoQFormerConfig, InstructBlipVideoQFormerModel
|
| 170 |
+
|
| 171 |
+
>>> # Initializing a InstructBlipVideo Salesforce/instruct-blip-flan-t5 style configuration
|
| 172 |
+
>>> configuration = InstructBlipVideoQFormerConfig()
|
| 173 |
+
|
| 174 |
+
>>> # Initializing a model (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
|
| 175 |
+
>>> model = InstructBlipVideoQFormerModel(configuration)
|
| 176 |
+
>>> # Accessing the model configuration
|
| 177 |
+
>>> configuration = model.config
|
| 178 |
+
```"""
|
| 179 |
+
|
| 180 |
+
model_type = "instructblipvideo_qformer"
|
| 181 |
+
base_config_key = "qformer_config"
|
| 182 |
+
|
| 183 |
+
def __init__(
|
| 184 |
+
self,
|
| 185 |
+
vocab_size=30522,
|
| 186 |
+
hidden_size=768,
|
| 187 |
+
num_hidden_layers=12,
|
| 188 |
+
num_attention_heads=12,
|
| 189 |
+
intermediate_size=3072,
|
| 190 |
+
hidden_act="gelu",
|
| 191 |
+
hidden_dropout_prob=0.1,
|
| 192 |
+
attention_probs_dropout_prob=0.1,
|
| 193 |
+
max_position_embeddings=512,
|
| 194 |
+
initializer_range=0.02,
|
| 195 |
+
layer_norm_eps=1e-12,
|
| 196 |
+
pad_token_id=0,
|
| 197 |
+
position_embedding_type="absolute",
|
| 198 |
+
cross_attention_frequency=2,
|
| 199 |
+
encoder_hidden_size=1408,
|
| 200 |
+
**kwargs,
|
| 201 |
+
):
|
| 202 |
+
super().__init__(pad_token_id=pad_token_id, **kwargs)
|
| 203 |
+
|
| 204 |
+
self.vocab_size = vocab_size
|
| 205 |
+
self.hidden_size = hidden_size
|
| 206 |
+
self.num_hidden_layers = num_hidden_layers
|
| 207 |
+
self.num_attention_heads = num_attention_heads
|
| 208 |
+
self.hidden_act = hidden_act
|
| 209 |
+
self.intermediate_size = intermediate_size
|
| 210 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 211 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 212 |
+
self.max_position_embeddings = max_position_embeddings
|
| 213 |
+
self.initializer_range = initializer_range
|
| 214 |
+
self.layer_norm_eps = layer_norm_eps
|
| 215 |
+
self.position_embedding_type = position_embedding_type
|
| 216 |
+
self.cross_attention_frequency = cross_attention_frequency
|
| 217 |
+
self.encoder_hidden_size = encoder_hidden_size
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class InstructBlipVideoConfig(PretrainedConfig):
|
| 221 |
+
r"""
|
| 222 |
+
[`InstructBlipVideoConfig`] is the configuration class to store the configuration of a
|
| 223 |
+
[`InstructBlipVideoForConditionalGeneration`]. It is used to instantiate a Instructblipvideo model according to the specified
|
| 224 |
+
arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
|
| 225 |
+
the defaults will yield a similar configuration to that of the Instructblipvideo
|
| 226 |
+
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
|
| 227 |
+
|
| 228 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 229 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 230 |
+
|
| 231 |
+
Args:
|
| 232 |
+
vision_config (`dict`, *optional*):
|
| 233 |
+
Dictionary of configuration options used to initialize [`InstructBlipVideoVisionConfig`].
|
| 234 |
+
qformer_config (`dict`, *optional*):
|
| 235 |
+
Dictionary of configuration options used to initialize [`InstructBlipVideoQFormerConfig`].
|
| 236 |
+
text_config (`dict`, *optional*):
|
| 237 |
+
Dictionary of configuration options used to initialize any [`PretrainedConfig`].
|
| 238 |
+
num_query_tokens (`int`, *optional*, defaults to 32):
|
| 239 |
+
The number of query tokens passed through the Transformer.
|
| 240 |
+
|
| 241 |
+
video_token_index (`int`, *optional*):
|
| 242 |
+
Token index of special video token.
|
| 243 |
+
kwargs (*optional*):
|
| 244 |
+
Dictionary of keyword arguments.
|
| 245 |
+
|
| 246 |
+
Example:
|
| 247 |
+
|
| 248 |
+
```python
|
| 249 |
+
>>> from transformers import (
|
| 250 |
+
... InstructBlipVideoVisionConfig,
|
| 251 |
+
... InstructBlipVideoQFormerConfig,
|
| 252 |
+
... OPTConfig,
|
| 253 |
+
... InstructBlipVideoConfig,
|
| 254 |
+
... InstructBlipVideoForConditionalGeneration,
|
| 255 |
+
... )
|
| 256 |
+
|
| 257 |
+
>>> # Initializing a InstructBlipVideoConfig with Salesforce/instruct-blip-flan-t5 style configuration
|
| 258 |
+
>>> configuration = InstructBlipVideoConfig()
|
| 259 |
+
|
| 260 |
+
>>> # Initializing a InstructBlipVideoForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
|
| 261 |
+
>>> model = InstructBlipVideoForConditionalGeneration(configuration)
|
| 262 |
+
|
| 263 |
+
>>> # Accessing the model configuration
|
| 264 |
+
>>> configuration = model.config
|
| 265 |
+
|
| 266 |
+
>>> # We can also initialize a InstructBlipVideoConfig from a InstructBlipVideoVisionConfig, InstructBlipVideoQFormerConfig and any PretrainedConfig
|
| 267 |
+
|
| 268 |
+
>>> # Initializing Instructblipvideo vision, Instructblipvideo Q-Former and language model configurations
|
| 269 |
+
>>> vision_config = InstructBlipVideoVisionConfig()
|
| 270 |
+
>>> qformer_config = InstructBlipVideoQFormerConfig()
|
| 271 |
+
>>> text_config = OPTConfig()
|
| 272 |
+
|
| 273 |
+
>>> config = InstructBlipVideoConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
|
| 274 |
+
```"""
|
| 275 |
+
|
| 276 |
+
model_type = "instructblipvideo"
|
| 277 |
+
sub_configs = {
|
| 278 |
+
"text_config": AutoConfig,
|
| 279 |
+
"qformer_config": InstructBlipVideoQFormerConfig,
|
| 280 |
+
"vision_config": InstructBlipVideoVisionConfig,
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
def __init__(
|
| 284 |
+
self,
|
| 285 |
+
vision_config=None,
|
| 286 |
+
qformer_config=None,
|
| 287 |
+
text_config=None,
|
| 288 |
+
num_query_tokens=32,
|
| 289 |
+
video_token_index=None,
|
| 290 |
+
**kwargs,
|
| 291 |
+
):
|
| 292 |
+
super().__init__(**kwargs)
|
| 293 |
+
|
| 294 |
+
if vision_config is None:
|
| 295 |
+
vision_config = {}
|
| 296 |
+
logger.info("vision_config is None. initializing the InstructBlipVideoVisionConfig with default values.")
|
| 297 |
+
|
| 298 |
+
if qformer_config is None:
|
| 299 |
+
qformer_config = {}
|
| 300 |
+
logger.info("qformer_config is None. Initializing the InstructBlipVideoQFormerConfig with default values.")
|
| 301 |
+
|
| 302 |
+
if text_config is None:
|
| 303 |
+
text_config = {}
|
| 304 |
+
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
|
| 305 |
+
|
| 306 |
+
self.vision_config = InstructBlipVideoVisionConfig(**vision_config)
|
| 307 |
+
self.qformer_config = InstructBlipVideoQFormerConfig(**qformer_config)
|
| 308 |
+
text_model_type = text_config["model_type"] if "model_type" in text_config else "opt"
|
| 309 |
+
self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
|
| 310 |
+
|
| 311 |
+
self.tie_word_embeddings = self.text_config.tie_word_embeddings
|
| 312 |
+
self.is_encoder_decoder = self.text_config.is_encoder_decoder
|
| 313 |
+
|
| 314 |
+
self.num_query_tokens = num_query_tokens
|
| 315 |
+
self.video_token_index = video_token_index
|
| 316 |
+
self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
|
| 317 |
+
self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
|
| 318 |
+
self.initializer_factor = 1.0
|
| 319 |
+
self.initializer_range = 0.02
|
| 320 |
+
|
| 321 |
+
@classmethod
|
| 322 |
+
def from_vision_qformer_text_configs(
|
| 323 |
+
cls,
|
| 324 |
+
vision_config: InstructBlipVideoVisionConfig,
|
| 325 |
+
qformer_config: InstructBlipVideoQFormerConfig,
|
| 326 |
+
text_config: PretrainedConfig,
|
| 327 |
+
**kwargs,
|
| 328 |
+
):
|
| 329 |
+
r"""
|
| 330 |
+
Instantiate a [`InstructBlipVideoConfig`] (or a derived class) from a InstructBlipVideo vision model, Q-Former and
|
| 331 |
+
language model configurations.
|
| 332 |
+
|
| 333 |
+
Returns:
|
| 334 |
+
[`InstructBlipVideoConfig`]: An instance of a configuration object
|
| 335 |
+
"""
|
| 336 |
+
|
| 337 |
+
return cls(
|
| 338 |
+
vision_config=vision_config.to_dict(),
|
| 339 |
+
qformer_config=qformer_config.to_dict(),
|
| 340 |
+
text_config=text_config.to_dict(),
|
| 341 |
+
**kwargs,
|
| 342 |
+
)
|
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/modeling_instructblipvideo.py
ADDED
|
@@ -0,0 +1,1670 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 2 |
+
# This file was automatically generated from src/transformers/models/instructblipvideo/modular_instructblipvideo.py.
|
| 3 |
+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
| 4 |
+
# the file from the modular. If any change should be done, please apply the change to the
|
| 5 |
+
# modular_instructblipvideo.py file directly. One of our CI enforces this.
|
| 6 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 7 |
+
# coding=utf-8
|
| 8 |
+
# Copyright 2024 HuggingFace Inc. team. All rights reserved.
|
| 9 |
+
#
|
| 10 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 11 |
+
# you may not use this file except in compliance with the License.
|
| 12 |
+
# You may obtain a copy of the License at
|
| 13 |
+
#
|
| 14 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 15 |
+
#
|
| 16 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 17 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 18 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 19 |
+
# See the License for the specific language governing permissions and
|
| 20 |
+
# limitations under the License.
|
| 21 |
+
|
| 22 |
+
import math
|
| 23 |
+
from dataclasses import dataclass
|
| 24 |
+
from typing import Any, Optional, Tuple, Union
|
| 25 |
+
|
| 26 |
+
import torch
|
| 27 |
+
from torch import nn
|
| 28 |
+
from torch.nn import CrossEntropyLoss
|
| 29 |
+
|
| 30 |
+
from ...activations import ACT2FN
|
| 31 |
+
from ...generation import GenerationMixin
|
| 32 |
+
from ...modeling_outputs import (
|
| 33 |
+
BaseModelOutput,
|
| 34 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
| 35 |
+
BaseModelOutputWithPooling,
|
| 36 |
+
BaseModelOutputWithPoolingAndCrossAttentions,
|
| 37 |
+
)
|
| 38 |
+
from ...modeling_utils import PreTrainedModel
|
| 39 |
+
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
|
| 40 |
+
from ...utils import (
|
| 41 |
+
ModelOutput,
|
| 42 |
+
add_start_docstrings,
|
| 43 |
+
add_start_docstrings_to_model_forward,
|
| 44 |
+
logging,
|
| 45 |
+
replace_return_docstrings,
|
| 46 |
+
torch_int,
|
| 47 |
+
)
|
| 48 |
+
from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM
|
| 49 |
+
from .configuration_instructblipvideo import (
|
| 50 |
+
InstructBlipVideoConfig,
|
| 51 |
+
InstructBlipVideoQFormerConfig,
|
| 52 |
+
InstructBlipVideoVisionConfig,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
logger = logging.get_logger(__name__)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@dataclass
|
| 60 |
+
class InstructBlipVideoForConditionalGenerationModelOutput(ModelOutput):
|
| 61 |
+
"""
|
| 62 |
+
Class defining the outputs of [`InstructBlipVideoForConditionalGeneration`].
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
|
| 66 |
+
Language modeling loss from the language model.
|
| 67 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
| 68 |
+
Prediction scores of the language modeling head of the language model.
|
| 69 |
+
vision_outputs (`BaseModelOutputWithPooling`):
|
| 70 |
+
Outputs of the vision encoder.
|
| 71 |
+
qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
|
| 72 |
+
Outputs of the Q-Former (Querying Transformer).
|
| 73 |
+
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
|
| 74 |
+
Outputs of the language model.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
loss: Optional[Tuple[torch.FloatTensor]] = None
|
| 78 |
+
logits: Optional[Tuple[torch.FloatTensor]] = None
|
| 79 |
+
vision_outputs: Optional[torch.FloatTensor] = None
|
| 80 |
+
qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None
|
| 81 |
+
language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
|
| 82 |
+
|
| 83 |
+
def to_tuple(self) -> Tuple[Any]:
|
| 84 |
+
return tuple(
|
| 85 |
+
self[k]
|
| 86 |
+
if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"]
|
| 87 |
+
else getattr(self, k).to_tuple()
|
| 88 |
+
for k in self.keys()
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class InstructBlipVideoVisionEmbeddings(nn.Module):
|
| 93 |
+
def __init__(self, config: InstructBlipVideoVisionConfig):
|
| 94 |
+
super().__init__()
|
| 95 |
+
self.config = config
|
| 96 |
+
self.embed_dim = config.hidden_size
|
| 97 |
+
self.image_size = config.image_size
|
| 98 |
+
self.patch_size = config.patch_size
|
| 99 |
+
|
| 100 |
+
self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
|
| 101 |
+
|
| 102 |
+
self.patch_embedding = nn.Conv2d(
|
| 103 |
+
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
| 107 |
+
self.num_positions = self.num_patches + 1
|
| 108 |
+
|
| 109 |
+
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
|
| 110 |
+
|
| 111 |
+
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
|
| 112 |
+
"""
|
| 113 |
+
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
|
| 114 |
+
images. This method is also adapted to support torch.jit tracing.
|
| 115 |
+
|
| 116 |
+
Adapted from:
|
| 117 |
+
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
|
| 118 |
+
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
num_patches = embeddings.shape[1] - 1
|
| 122 |
+
num_positions = self.position_embedding.shape[1] - 1
|
| 123 |
+
|
| 124 |
+
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
|
| 125 |
+
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
|
| 126 |
+
return self.position_embedding
|
| 127 |
+
|
| 128 |
+
class_pos_embed = self.position_embedding[:, :1]
|
| 129 |
+
patch_pos_embed = self.position_embedding[:, 1:]
|
| 130 |
+
|
| 131 |
+
dim = embeddings.shape[-1]
|
| 132 |
+
|
| 133 |
+
new_height = height // self.patch_size
|
| 134 |
+
new_width = width // self.patch_size
|
| 135 |
+
|
| 136 |
+
sqrt_num_positions = torch_int(num_positions**0.5)
|
| 137 |
+
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
|
| 138 |
+
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
|
| 139 |
+
|
| 140 |
+
patch_pos_embed = nn.functional.interpolate(
|
| 141 |
+
patch_pos_embed,
|
| 142 |
+
size=(new_height, new_width),
|
| 143 |
+
mode="bicubic",
|
| 144 |
+
align_corners=False,
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
| 148 |
+
|
| 149 |
+
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
|
| 150 |
+
|
| 151 |
+
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
|
| 152 |
+
batch_size, _, height, width = pixel_values.shape
|
| 153 |
+
target_dtype = self.patch_embedding.weight.dtype
|
| 154 |
+
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
|
| 155 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
| 156 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
|
| 157 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
| 158 |
+
if interpolate_pos_encoding:
|
| 159 |
+
position_embedding = self.interpolate_pos_encoding(embeddings, height, width)
|
| 160 |
+
else:
|
| 161 |
+
position_embedding = self.position_embedding
|
| 162 |
+
embeddings = embeddings + position_embedding[:, : embeddings.size(1), :].to(target_dtype)
|
| 163 |
+
return embeddings
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class InstructBlipVideoAttention(nn.Module):
|
| 167 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 168 |
+
|
| 169 |
+
def __init__(self, config):
|
| 170 |
+
super().__init__()
|
| 171 |
+
self.config = config
|
| 172 |
+
self.embed_dim = config.hidden_size
|
| 173 |
+
self.num_heads = config.num_attention_heads
|
| 174 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 175 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
| 176 |
+
raise ValueError(
|
| 177 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
| 178 |
+
f" {self.num_heads})."
|
| 179 |
+
)
|
| 180 |
+
self.scale = self.head_dim**-0.5
|
| 181 |
+
self.dropout = nn.Dropout(config.attention_dropout)
|
| 182 |
+
|
| 183 |
+
# small tweak here compared to CLIP, no bias here
|
| 184 |
+
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
|
| 185 |
+
|
| 186 |
+
if config.qkv_bias:
|
| 187 |
+
q_bias = nn.Parameter(torch.zeros(self.embed_dim))
|
| 188 |
+
v_bias = nn.Parameter(torch.zeros(self.embed_dim))
|
| 189 |
+
else:
|
| 190 |
+
q_bias = None
|
| 191 |
+
v_bias = None
|
| 192 |
+
|
| 193 |
+
if q_bias is not None:
|
| 194 |
+
qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
|
| 195 |
+
self.qkv.bias = nn.Parameter(qkv_bias)
|
| 196 |
+
|
| 197 |
+
self.projection = nn.Linear(self.embed_dim, self.embed_dim)
|
| 198 |
+
|
| 199 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 200 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 201 |
+
|
| 202 |
+
def forward(
|
| 203 |
+
self,
|
| 204 |
+
hidden_states: torch.Tensor,
|
| 205 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 206 |
+
output_attentions: Optional[bool] = False,
|
| 207 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 208 |
+
"""Input shape: Batch x Time x Channel"""
|
| 209 |
+
|
| 210 |
+
bsz, tgt_len, embed_dim = hidden_states.size()
|
| 211 |
+
|
| 212 |
+
mixed_qkv = self.qkv(hidden_states)
|
| 213 |
+
|
| 214 |
+
mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
|
| 215 |
+
2, 0, 3, 1, 4
|
| 216 |
+
)
|
| 217 |
+
query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
|
| 218 |
+
|
| 219 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 220 |
+
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
|
| 221 |
+
|
| 222 |
+
attention_scores = attention_scores * self.scale
|
| 223 |
+
|
| 224 |
+
# Normalize the attention scores to probabilities.
|
| 225 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 226 |
+
|
| 227 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 228 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 229 |
+
attention_probs = self.dropout(attention_probs)
|
| 230 |
+
|
| 231 |
+
# Mask heads if we want to
|
| 232 |
+
if head_mask is not None:
|
| 233 |
+
attention_probs = attention_probs * head_mask
|
| 234 |
+
|
| 235 |
+
context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
|
| 236 |
+
|
| 237 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
|
| 238 |
+
context_layer = context_layer.reshape(new_context_layer_shape)
|
| 239 |
+
|
| 240 |
+
output = self.projection(context_layer)
|
| 241 |
+
|
| 242 |
+
outputs = (output, attention_probs) if output_attentions else (output, None)
|
| 243 |
+
|
| 244 |
+
return outputs
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
class InstructBlipVideoMLP(nn.Module):
|
| 248 |
+
def __init__(self, config):
|
| 249 |
+
super().__init__()
|
| 250 |
+
self.config = config
|
| 251 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
| 252 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 253 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 254 |
+
|
| 255 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 256 |
+
hidden_states = self.fc1(hidden_states)
|
| 257 |
+
hidden_states = self.activation_fn(hidden_states)
|
| 258 |
+
hidden_states = self.fc2(hidden_states)
|
| 259 |
+
return hidden_states
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
class InstructBlipVideoEncoderLayer(nn.Module):
|
| 263 |
+
def __init__(self, config: InstructBlipVideoConfig):
|
| 264 |
+
super().__init__()
|
| 265 |
+
self.embed_dim = config.hidden_size
|
| 266 |
+
self.self_attn = InstructBlipVideoAttention(config)
|
| 267 |
+
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 268 |
+
self.mlp = InstructBlipVideoMLP(config)
|
| 269 |
+
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 270 |
+
|
| 271 |
+
def forward(
|
| 272 |
+
self,
|
| 273 |
+
hidden_states: torch.Tensor,
|
| 274 |
+
attention_mask: torch.Tensor,
|
| 275 |
+
output_attentions: Optional[bool] = False,
|
| 276 |
+
) -> Tuple[torch.FloatTensor]:
|
| 277 |
+
"""
|
| 278 |
+
Args:
|
| 279 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 280 |
+
attention_mask (`torch.FloatTensor`): attention mask of size
|
| 281 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 282 |
+
`(config.encoder_attention_heads,)`.
|
| 283 |
+
output_attentions (`bool`, *optional*):
|
| 284 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 285 |
+
returned tensors for more detail.
|
| 286 |
+
"""
|
| 287 |
+
residual = hidden_states
|
| 288 |
+
|
| 289 |
+
hidden_states = self.layer_norm1(hidden_states)
|
| 290 |
+
hidden_states, attn_weights = self.self_attn(
|
| 291 |
+
hidden_states=hidden_states,
|
| 292 |
+
head_mask=attention_mask,
|
| 293 |
+
output_attentions=output_attentions,
|
| 294 |
+
)
|
| 295 |
+
hidden_states = hidden_states + residual
|
| 296 |
+
residual = hidden_states
|
| 297 |
+
hidden_states = self.layer_norm2(hidden_states)
|
| 298 |
+
hidden_states = self.mlp(hidden_states)
|
| 299 |
+
|
| 300 |
+
hidden_states = hidden_states + residual
|
| 301 |
+
|
| 302 |
+
outputs = (hidden_states,)
|
| 303 |
+
|
| 304 |
+
if output_attentions:
|
| 305 |
+
outputs += (attn_weights,)
|
| 306 |
+
|
| 307 |
+
return outputs
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
class InstructBlipVideoPreTrainedModel(PreTrainedModel):
|
| 311 |
+
"""
|
| 312 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 313 |
+
models.
|
| 314 |
+
"""
|
| 315 |
+
|
| 316 |
+
config_class = InstructBlipVideoConfig
|
| 317 |
+
base_model_prefix = "blip"
|
| 318 |
+
supports_gradient_checkpointing = True
|
| 319 |
+
|
| 320 |
+
_no_split_modules = [
|
| 321 |
+
"InstructBlipVideoQFormerEmbeddings",
|
| 322 |
+
"InstructBlipVideoAttention",
|
| 323 |
+
"InstructBlipVideoQFormerMultiHeadAttention",
|
| 324 |
+
"InstructBlipVideoQFormerSelfOutput",
|
| 325 |
+
]
|
| 326 |
+
_keep_in_fp32_modules = []
|
| 327 |
+
|
| 328 |
+
def _init_weights(self, module):
|
| 329 |
+
"""Initialize the weights"""
|
| 330 |
+
factor = self.config.initializer_range
|
| 331 |
+
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
|
| 332 |
+
module.weight.data.normal_(mean=0.0, std=factor)
|
| 333 |
+
if hasattr(module, "bias") and module.bias is not None:
|
| 334 |
+
module.bias.data.zero_()
|
| 335 |
+
|
| 336 |
+
if isinstance(module, InstructBlipVideoVisionEmbeddings):
|
| 337 |
+
if hasattr(self.config, "vision_config") and not isinstance(self.config, InstructBlipVideoVisionConfig):
|
| 338 |
+
factor = self.config.vision_config.initializer_range
|
| 339 |
+
nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
|
| 340 |
+
nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
|
| 341 |
+
|
| 342 |
+
elif isinstance(module, nn.LayerNorm):
|
| 343 |
+
module.bias.data.zero_()
|
| 344 |
+
module.weight.data.fill_(1.0)
|
| 345 |
+
elif isinstance(module, nn.Linear) and module.bias is not None:
|
| 346 |
+
module.bias.data.zero_()
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
class InstructBlipVideoEncoder(nn.Module):
|
| 350 |
+
"""
|
| 351 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| 352 |
+
[`InstructBlipVideoEncoderLayer`].
|
| 353 |
+
|
| 354 |
+
Args:
|
| 355 |
+
config (`InstructBlipVideoConfig`):
|
| 356 |
+
The corresponding vision configuration for the `InstructBlipVideoEncoder`.
|
| 357 |
+
"""
|
| 358 |
+
|
| 359 |
+
def __init__(self, config: InstructBlipVideoConfig):
|
| 360 |
+
super().__init__()
|
| 361 |
+
self.config = config
|
| 362 |
+
self.layers = nn.ModuleList([InstructBlipVideoEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 363 |
+
self.gradient_checkpointing = False
|
| 364 |
+
|
| 365 |
+
def forward(
|
| 366 |
+
self,
|
| 367 |
+
inputs_embeds,
|
| 368 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 369 |
+
output_attentions: Optional[bool] = None,
|
| 370 |
+
output_hidden_states: Optional[bool] = None,
|
| 371 |
+
return_dict: Optional[bool] = None,
|
| 372 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 373 |
+
r"""
|
| 374 |
+
Args:
|
| 375 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 376 |
+
Embedded representation of the inputs. Should be float, not int tokens.
|
| 377 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 378 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 379 |
+
|
| 380 |
+
- 1 for tokens that are **not masked**,
|
| 381 |
+
- 0 for tokens that are **masked**.
|
| 382 |
+
|
| 383 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 384 |
+
output_attentions (`bool`, *optional*):
|
| 385 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 386 |
+
returned tensors for more detail.
|
| 387 |
+
output_hidden_states (`bool`, *optional*):
|
| 388 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 389 |
+
for more detail.
|
| 390 |
+
return_dict (`bool`, *optional*):
|
| 391 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 392 |
+
"""
|
| 393 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 394 |
+
output_hidden_states = (
|
| 395 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 396 |
+
)
|
| 397 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 398 |
+
|
| 399 |
+
encoder_states = () if output_hidden_states else None
|
| 400 |
+
all_attentions = () if output_attentions else None
|
| 401 |
+
|
| 402 |
+
hidden_states = inputs_embeds
|
| 403 |
+
for idx, encoder_layer in enumerate(self.layers):
|
| 404 |
+
if output_hidden_states:
|
| 405 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 406 |
+
if self.gradient_checkpointing and self.training:
|
| 407 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 408 |
+
encoder_layer.__call__,
|
| 409 |
+
hidden_states,
|
| 410 |
+
attention_mask,
|
| 411 |
+
output_attentions,
|
| 412 |
+
)
|
| 413 |
+
else:
|
| 414 |
+
layer_outputs = encoder_layer(
|
| 415 |
+
hidden_states,
|
| 416 |
+
attention_mask,
|
| 417 |
+
output_attentions=output_attentions,
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
hidden_states = layer_outputs[0]
|
| 421 |
+
|
| 422 |
+
if output_attentions:
|
| 423 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
| 424 |
+
|
| 425 |
+
if output_hidden_states:
|
| 426 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 427 |
+
|
| 428 |
+
if not return_dict:
|
| 429 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
| 430 |
+
return BaseModelOutput(
|
| 431 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
INSTRUCTBLIPVIDEO_VISION_INPUTS_DOCSTRING = r"""
|
| 436 |
+
Args:
|
| 437 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 438 |
+
Pixel values. Pixel values can be obtained using [`InstructBlipVideoProcessor`]. See
|
| 439 |
+
[`InstructBlipVideoProcessor.__call__`] for details.
|
| 440 |
+
output_attentions (`bool`, *optional*):
|
| 441 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 442 |
+
tensors for more detail.
|
| 443 |
+
output_hidden_states (`bool`, *optional*):
|
| 444 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 445 |
+
more detail.
|
| 446 |
+
return_dict (`bool`, *optional*):
|
| 447 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 448 |
+
interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
|
| 449 |
+
Whether to interpolate the pre-trained position encodings.
|
| 450 |
+
"""
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
class InstructBlipVideoVisionModel(InstructBlipVideoPreTrainedModel):
|
| 454 |
+
main_input_name = "pixel_values"
|
| 455 |
+
config_class = InstructBlipVideoVisionConfig
|
| 456 |
+
|
| 457 |
+
def __init__(self, config: InstructBlipVideoVisionConfig):
|
| 458 |
+
super().__init__(config)
|
| 459 |
+
self.config = config
|
| 460 |
+
embed_dim = config.hidden_size
|
| 461 |
+
|
| 462 |
+
self.embeddings = InstructBlipVideoVisionEmbeddings(config)
|
| 463 |
+
self.encoder = InstructBlipVideoEncoder(config)
|
| 464 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 465 |
+
|
| 466 |
+
self.post_init()
|
| 467 |
+
|
| 468 |
+
@add_start_docstrings_to_model_forward(INSTRUCTBLIPVIDEO_VISION_INPUTS_DOCSTRING)
|
| 469 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=InstructBlipVideoVisionConfig)
|
| 470 |
+
def forward(
|
| 471 |
+
self,
|
| 472 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 473 |
+
output_attentions: Optional[bool] = None,
|
| 474 |
+
output_hidden_states: Optional[bool] = None,
|
| 475 |
+
return_dict: Optional[bool] = None,
|
| 476 |
+
interpolate_pos_encoding: bool = False,
|
| 477 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 478 |
+
r"""
|
| 479 |
+
Returns:
|
| 480 |
+
|
| 481 |
+
"""
|
| 482 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 483 |
+
output_hidden_states = (
|
| 484 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 485 |
+
)
|
| 486 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 487 |
+
|
| 488 |
+
if pixel_values is None:
|
| 489 |
+
raise ValueError("You have to specify pixel_values")
|
| 490 |
+
|
| 491 |
+
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
|
| 492 |
+
|
| 493 |
+
encoder_outputs = self.encoder(
|
| 494 |
+
inputs_embeds=hidden_states,
|
| 495 |
+
output_attentions=output_attentions,
|
| 496 |
+
output_hidden_states=output_hidden_states,
|
| 497 |
+
return_dict=return_dict,
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
last_hidden_state = encoder_outputs[0]
|
| 501 |
+
last_hidden_state = self.post_layernorm(last_hidden_state)
|
| 502 |
+
|
| 503 |
+
pooled_output = last_hidden_state[:, 0, :]
|
| 504 |
+
pooled_output = self.post_layernorm(pooled_output)
|
| 505 |
+
|
| 506 |
+
if not return_dict:
|
| 507 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 508 |
+
|
| 509 |
+
return BaseModelOutputWithPooling(
|
| 510 |
+
last_hidden_state=last_hidden_state,
|
| 511 |
+
pooler_output=pooled_output,
|
| 512 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 513 |
+
attentions=encoder_outputs.attentions,
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
def get_input_embeddings(self):
|
| 517 |
+
return self.embeddings
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
class InstructBlipVideoQFormerMultiHeadAttention(nn.Module):
|
| 521 |
+
def __init__(self, config, is_cross_attention=False):
|
| 522 |
+
super().__init__()
|
| 523 |
+
self.config = config
|
| 524 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
| 525 |
+
raise ValueError(
|
| 526 |
+
"The hidden size (%d) is not a multiple of the number of attention heads (%d)"
|
| 527 |
+
% (config.hidden_size, config.num_attention_heads)
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
self.num_attention_heads = config.num_attention_heads
|
| 531 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
| 532 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 533 |
+
|
| 534 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
| 535 |
+
if is_cross_attention:
|
| 536 |
+
self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
|
| 537 |
+
self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
|
| 538 |
+
else:
|
| 539 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
| 540 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
| 541 |
+
|
| 542 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 543 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
| 544 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
| 545 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 546 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
| 547 |
+
self.save_attention = False
|
| 548 |
+
|
| 549 |
+
def save_attn_gradients(self, attn_gradients):
|
| 550 |
+
self.attn_gradients = attn_gradients
|
| 551 |
+
|
| 552 |
+
def get_attn_gradients(self):
|
| 553 |
+
return self.attn_gradients
|
| 554 |
+
|
| 555 |
+
def save_attention_map(self, attention_map):
|
| 556 |
+
self.attention_map = attention_map
|
| 557 |
+
|
| 558 |
+
def get_attention_map(self):
|
| 559 |
+
return self.attention_map
|
| 560 |
+
|
| 561 |
+
def transpose_for_scores(self, x):
|
| 562 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
| 563 |
+
x = x.view(*new_x_shape)
|
| 564 |
+
return x.permute(0, 2, 1, 3)
|
| 565 |
+
|
| 566 |
+
def forward(
|
| 567 |
+
self,
|
| 568 |
+
hidden_states,
|
| 569 |
+
attention_mask=None,
|
| 570 |
+
head_mask=None,
|
| 571 |
+
encoder_hidden_states=None,
|
| 572 |
+
encoder_attention_mask=None,
|
| 573 |
+
past_key_value=None,
|
| 574 |
+
output_attentions=False,
|
| 575 |
+
):
|
| 576 |
+
# If this is instantiated as a cross-attention module, the keys
|
| 577 |
+
# and values come from an encoder; the attention mask needs to be
|
| 578 |
+
# such that the encoder's padding tokens are not attended to.
|
| 579 |
+
is_cross_attention = encoder_hidden_states is not None
|
| 580 |
+
|
| 581 |
+
if is_cross_attention:
|
| 582 |
+
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
| 583 |
+
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
| 584 |
+
attention_mask = encoder_attention_mask
|
| 585 |
+
elif past_key_value is not None:
|
| 586 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 587 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 588 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
| 589 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
| 590 |
+
else:
|
| 591 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 592 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 593 |
+
|
| 594 |
+
mixed_query_layer = self.query(hidden_states)
|
| 595 |
+
|
| 596 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 597 |
+
|
| 598 |
+
past_key_value = (key_layer, value_layer)
|
| 599 |
+
|
| 600 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 601 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
| 602 |
+
|
| 603 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
| 604 |
+
seq_length = hidden_states.size()[1]
|
| 605 |
+
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
| 606 |
+
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
| 607 |
+
distance = position_ids_l - position_ids_r
|
| 608 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
| 609 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
| 610 |
+
|
| 611 |
+
if self.position_embedding_type == "relative_key":
|
| 612 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
| 613 |
+
attention_scores = attention_scores + relative_position_scores
|
| 614 |
+
elif self.position_embedding_type == "relative_key_query":
|
| 615 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
| 616 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
| 617 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
| 618 |
+
|
| 619 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 620 |
+
attention_scores_dtype = attention_scores.dtype
|
| 621 |
+
|
| 622 |
+
if attention_mask is not None:
|
| 623 |
+
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
| 624 |
+
attention_scores = attention_scores + attention_mask
|
| 625 |
+
|
| 626 |
+
# Normalize the attention scores to probabilities.
|
| 627 |
+
attention_probs = nn.Softmax(dim=-1)(attention_scores).to(attention_scores_dtype)
|
| 628 |
+
|
| 629 |
+
if is_cross_attention and self.save_attention:
|
| 630 |
+
self.save_attention_map(attention_probs)
|
| 631 |
+
attention_probs.register_hook(self.save_attn_gradients)
|
| 632 |
+
|
| 633 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 634 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 635 |
+
attention_probs_dropped = self.dropout(attention_probs)
|
| 636 |
+
|
| 637 |
+
# Mask heads if we want to
|
| 638 |
+
if head_mask is not None:
|
| 639 |
+
attention_probs_dropped = attention_probs_dropped * head_mask
|
| 640 |
+
|
| 641 |
+
context_layer = torch.matmul(attention_probs_dropped, value_layer)
|
| 642 |
+
|
| 643 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 644 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 645 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
| 646 |
+
|
| 647 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 648 |
+
|
| 649 |
+
outputs = outputs + (past_key_value,)
|
| 650 |
+
return outputs
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
class InstructBlipVideoQFormerSelfOutput(nn.Module):
|
| 654 |
+
def __init__(self, config):
|
| 655 |
+
super().__init__()
|
| 656 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 657 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 658 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 659 |
+
|
| 660 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
| 661 |
+
hidden_states = self.dense(hidden_states)
|
| 662 |
+
hidden_states = self.dropout(hidden_states)
|
| 663 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 664 |
+
return hidden_states
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
class InstructBlipVideoQFormerAttention(nn.Module):
|
| 668 |
+
def __init__(self, config, is_cross_attention=False):
|
| 669 |
+
super().__init__()
|
| 670 |
+
self.attention = InstructBlipVideoQFormerMultiHeadAttention(config, is_cross_attention)
|
| 671 |
+
self.output = InstructBlipVideoQFormerSelfOutput(config)
|
| 672 |
+
self.pruned_heads = set()
|
| 673 |
+
|
| 674 |
+
def prune_heads(self, heads):
|
| 675 |
+
if len(heads) == 0:
|
| 676 |
+
return
|
| 677 |
+
heads, index = find_pruneable_heads_and_indices(
|
| 678 |
+
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
# Prune linear layers
|
| 682 |
+
self.attention.query = prune_linear_layer(self.attention.query, index)
|
| 683 |
+
self.attention.key = prune_linear_layer(self.attention.key, index)
|
| 684 |
+
self.attention.value = prune_linear_layer(self.attention.value, index)
|
| 685 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
| 686 |
+
|
| 687 |
+
# Update hyper params and store pruned heads
|
| 688 |
+
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
|
| 689 |
+
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
|
| 690 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 691 |
+
|
| 692 |
+
def forward(
|
| 693 |
+
self,
|
| 694 |
+
hidden_states: torch.Tensor,
|
| 695 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 696 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 697 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
| 698 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
| 699 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 700 |
+
output_attentions: Optional[bool] = False,
|
| 701 |
+
) -> Tuple[torch.Tensor]:
|
| 702 |
+
self_outputs = self.attention(
|
| 703 |
+
hidden_states,
|
| 704 |
+
attention_mask,
|
| 705 |
+
head_mask,
|
| 706 |
+
encoder_hidden_states,
|
| 707 |
+
encoder_attention_mask,
|
| 708 |
+
past_key_value,
|
| 709 |
+
output_attentions,
|
| 710 |
+
)
|
| 711 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
| 712 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
| 713 |
+
return outputs
|
| 714 |
+
|
| 715 |
+
|
| 716 |
+
class InstructBlipVideoQFormerIntermediate(nn.Module):
|
| 717 |
+
def __init__(self, config):
|
| 718 |
+
super().__init__()
|
| 719 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 720 |
+
if isinstance(config.hidden_act, str):
|
| 721 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
| 722 |
+
else:
|
| 723 |
+
self.intermediate_act_fn = config.hidden_act
|
| 724 |
+
|
| 725 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 726 |
+
hidden_states = self.dense(hidden_states)
|
| 727 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 728 |
+
return hidden_states
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
class InstructBlipVideoQFormerOutput(nn.Module):
|
| 732 |
+
def __init__(self, config):
|
| 733 |
+
super().__init__()
|
| 734 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 735 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 736 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 737 |
+
|
| 738 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
| 739 |
+
hidden_states = self.dense(hidden_states)
|
| 740 |
+
hidden_states = self.dropout(hidden_states)
|
| 741 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 742 |
+
return hidden_states
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
class InstructBlipVideoQFormerLayer(nn.Module):
|
| 746 |
+
def __init__(self, config, layer_idx):
|
| 747 |
+
super().__init__()
|
| 748 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
| 749 |
+
self.seq_len_dim = 1
|
| 750 |
+
self.attention = InstructBlipVideoQFormerAttention(config)
|
| 751 |
+
|
| 752 |
+
self.layer_idx = layer_idx
|
| 753 |
+
|
| 754 |
+
if layer_idx % config.cross_attention_frequency == 0:
|
| 755 |
+
self.crossattention = InstructBlipVideoQFormerAttention(config, is_cross_attention=True)
|
| 756 |
+
self.has_cross_attention = True
|
| 757 |
+
else:
|
| 758 |
+
self.has_cross_attention = False
|
| 759 |
+
|
| 760 |
+
self.intermediate = InstructBlipVideoQFormerIntermediate(config)
|
| 761 |
+
self.output = InstructBlipVideoQFormerOutput(config)
|
| 762 |
+
|
| 763 |
+
self.intermediate_query = InstructBlipVideoQFormerIntermediate(config)
|
| 764 |
+
self.output_query = InstructBlipVideoQFormerOutput(config)
|
| 765 |
+
|
| 766 |
+
def forward(
|
| 767 |
+
self,
|
| 768 |
+
hidden_states,
|
| 769 |
+
attention_mask=None,
|
| 770 |
+
head_mask=None,
|
| 771 |
+
encoder_hidden_states=None,
|
| 772 |
+
encoder_attention_mask=None,
|
| 773 |
+
past_key_value=None,
|
| 774 |
+
output_attentions=False,
|
| 775 |
+
query_length=0,
|
| 776 |
+
):
|
| 777 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
| 778 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
| 779 |
+
self_attention_outputs = self.attention(
|
| 780 |
+
hidden_states,
|
| 781 |
+
attention_mask,
|
| 782 |
+
head_mask,
|
| 783 |
+
output_attentions=output_attentions,
|
| 784 |
+
past_key_value=self_attn_past_key_value,
|
| 785 |
+
)
|
| 786 |
+
attention_output = self_attention_outputs[0]
|
| 787 |
+
outputs = self_attention_outputs[1:-1]
|
| 788 |
+
|
| 789 |
+
present_key_value = self_attention_outputs[-1]
|
| 790 |
+
|
| 791 |
+
if query_length > 0:
|
| 792 |
+
query_attention_output = attention_output[:, :query_length, :]
|
| 793 |
+
|
| 794 |
+
if self.has_cross_attention:
|
| 795 |
+
if encoder_hidden_states is None:
|
| 796 |
+
raise ValueError("encoder_hidden_states must be given for cross-attention layers")
|
| 797 |
+
cross_attention_outputs = self.crossattention(
|
| 798 |
+
query_attention_output,
|
| 799 |
+
attention_mask,
|
| 800 |
+
head_mask,
|
| 801 |
+
encoder_hidden_states,
|
| 802 |
+
encoder_attention_mask,
|
| 803 |
+
output_attentions=output_attentions,
|
| 804 |
+
)
|
| 805 |
+
query_attention_output = cross_attention_outputs[0]
|
| 806 |
+
# add cross attentions if we output attention weights
|
| 807 |
+
outputs = outputs + cross_attention_outputs[1:-1]
|
| 808 |
+
|
| 809 |
+
layer_output = apply_chunking_to_forward(
|
| 810 |
+
self.feed_forward_chunk_query,
|
| 811 |
+
self.chunk_size_feed_forward,
|
| 812 |
+
self.seq_len_dim,
|
| 813 |
+
query_attention_output,
|
| 814 |
+
)
|
| 815 |
+
|
| 816 |
+
if attention_output.shape[1] > query_length:
|
| 817 |
+
layer_output_text = apply_chunking_to_forward(
|
| 818 |
+
self.feed_forward_chunk,
|
| 819 |
+
self.chunk_size_feed_forward,
|
| 820 |
+
self.seq_len_dim,
|
| 821 |
+
attention_output[:, query_length:, :],
|
| 822 |
+
)
|
| 823 |
+
layer_output = torch.cat([layer_output, layer_output_text], dim=1)
|
| 824 |
+
else:
|
| 825 |
+
layer_output = apply_chunking_to_forward(
|
| 826 |
+
self.feed_forward_chunk,
|
| 827 |
+
self.chunk_size_feed_forward,
|
| 828 |
+
self.seq_len_dim,
|
| 829 |
+
attention_output,
|
| 830 |
+
)
|
| 831 |
+
outputs = (layer_output,) + outputs
|
| 832 |
+
|
| 833 |
+
outputs = outputs + (present_key_value,)
|
| 834 |
+
|
| 835 |
+
return outputs
|
| 836 |
+
|
| 837 |
+
def feed_forward_chunk(self, attention_output):
|
| 838 |
+
intermediate_output = self.intermediate(attention_output)
|
| 839 |
+
layer_output = self.output(intermediate_output, attention_output)
|
| 840 |
+
return layer_output
|
| 841 |
+
|
| 842 |
+
def feed_forward_chunk_query(self, attention_output):
|
| 843 |
+
intermediate_output = self.intermediate_query(attention_output)
|
| 844 |
+
layer_output = self.output_query(intermediate_output, attention_output)
|
| 845 |
+
return layer_output
|
| 846 |
+
|
| 847 |
+
|
| 848 |
+
class InstructBlipVideoQFormerEncoder(nn.Module):
|
| 849 |
+
def __init__(self, config):
|
| 850 |
+
super().__init__()
|
| 851 |
+
self.config = config
|
| 852 |
+
self.layer = nn.ModuleList(
|
| 853 |
+
[InstructBlipVideoQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 854 |
+
)
|
| 855 |
+
self.gradient_checkpointing = False
|
| 856 |
+
|
| 857 |
+
def forward(
|
| 858 |
+
self,
|
| 859 |
+
hidden_states,
|
| 860 |
+
attention_mask=None,
|
| 861 |
+
head_mask=None,
|
| 862 |
+
encoder_hidden_states=None,
|
| 863 |
+
encoder_attention_mask=None,
|
| 864 |
+
past_key_values=None,
|
| 865 |
+
use_cache=None,
|
| 866 |
+
output_attentions=False,
|
| 867 |
+
output_hidden_states=False,
|
| 868 |
+
return_dict=True,
|
| 869 |
+
query_length=0,
|
| 870 |
+
):
|
| 871 |
+
all_hidden_states = () if output_hidden_states else None
|
| 872 |
+
all_self_attentions = () if output_attentions else None
|
| 873 |
+
all_cross_attentions = () if output_attentions else None
|
| 874 |
+
|
| 875 |
+
next_decoder_cache = () if use_cache else None
|
| 876 |
+
|
| 877 |
+
for i in range(self.config.num_hidden_layers):
|
| 878 |
+
layer_module = self.layer[i]
|
| 879 |
+
if output_hidden_states:
|
| 880 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 881 |
+
|
| 882 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 883 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
| 884 |
+
|
| 885 |
+
if getattr(self.config, "gradient_checkpointing", False) and self.training:
|
| 886 |
+
if use_cache:
|
| 887 |
+
logger.warning(
|
| 888 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 889 |
+
)
|
| 890 |
+
use_cache = False
|
| 891 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 892 |
+
layer_module.__call__,
|
| 893 |
+
hidden_states,
|
| 894 |
+
attention_mask,
|
| 895 |
+
layer_head_mask,
|
| 896 |
+
encoder_hidden_states,
|
| 897 |
+
encoder_attention_mask,
|
| 898 |
+
)
|
| 899 |
+
else:
|
| 900 |
+
layer_outputs = layer_module(
|
| 901 |
+
hidden_states,
|
| 902 |
+
attention_mask,
|
| 903 |
+
layer_head_mask,
|
| 904 |
+
encoder_hidden_states,
|
| 905 |
+
encoder_attention_mask,
|
| 906 |
+
past_key_value,
|
| 907 |
+
output_attentions,
|
| 908 |
+
query_length,
|
| 909 |
+
)
|
| 910 |
+
|
| 911 |
+
hidden_states = layer_outputs[0]
|
| 912 |
+
if use_cache:
|
| 913 |
+
next_decoder_cache += (layer_outputs[-1],)
|
| 914 |
+
if output_attentions:
|
| 915 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 916 |
+
if layer_module.has_cross_attention:
|
| 917 |
+
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
|
| 918 |
+
|
| 919 |
+
if output_hidden_states:
|
| 920 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 921 |
+
|
| 922 |
+
if not return_dict:
|
| 923 |
+
return tuple(
|
| 924 |
+
v
|
| 925 |
+
for v in [
|
| 926 |
+
hidden_states,
|
| 927 |
+
next_decoder_cache,
|
| 928 |
+
all_hidden_states,
|
| 929 |
+
all_self_attentions,
|
| 930 |
+
all_cross_attentions,
|
| 931 |
+
]
|
| 932 |
+
if v is not None
|
| 933 |
+
)
|
| 934 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
| 935 |
+
last_hidden_state=hidden_states,
|
| 936 |
+
past_key_values=next_decoder_cache,
|
| 937 |
+
hidden_states=all_hidden_states,
|
| 938 |
+
attentions=all_self_attentions,
|
| 939 |
+
cross_attentions=all_cross_attentions,
|
| 940 |
+
)
|
| 941 |
+
|
| 942 |
+
|
| 943 |
+
class InstructBlipVideoQFormerEmbeddings(nn.Module):
|
| 944 |
+
"""Construct the embeddings from word and position embeddings."""
|
| 945 |
+
|
| 946 |
+
def __init__(self, config):
|
| 947 |
+
super().__init__()
|
| 948 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
| 949 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
| 950 |
+
|
| 951 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 952 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 953 |
+
|
| 954 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
| 955 |
+
self.register_buffer(
|
| 956 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
| 957 |
+
)
|
| 958 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
| 959 |
+
|
| 960 |
+
self.config = config
|
| 961 |
+
|
| 962 |
+
def forward(
|
| 963 |
+
self,
|
| 964 |
+
input_ids=None,
|
| 965 |
+
position_ids=None,
|
| 966 |
+
query_embeds=None,
|
| 967 |
+
past_key_values_length=0,
|
| 968 |
+
):
|
| 969 |
+
if input_ids is not None:
|
| 970 |
+
seq_length = input_ids.size()[1]
|
| 971 |
+
else:
|
| 972 |
+
seq_length = 0
|
| 973 |
+
|
| 974 |
+
if position_ids is None:
|
| 975 |
+
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone()
|
| 976 |
+
|
| 977 |
+
if input_ids is not None:
|
| 978 |
+
embeddings = self.word_embeddings(input_ids)
|
| 979 |
+
if self.position_embedding_type == "absolute":
|
| 980 |
+
position_embeddings = self.position_embeddings(position_ids.to(embeddings.device))
|
| 981 |
+
embeddings = embeddings + position_embeddings
|
| 982 |
+
|
| 983 |
+
if query_embeds is not None:
|
| 984 |
+
embeddings = torch.cat((query_embeds, embeddings), dim=1)
|
| 985 |
+
else:
|
| 986 |
+
embeddings = query_embeds
|
| 987 |
+
|
| 988 |
+
embeddings = embeddings.to(self.layernorm.weight.dtype)
|
| 989 |
+
embeddings = self.layernorm(embeddings)
|
| 990 |
+
embeddings = self.dropout(embeddings)
|
| 991 |
+
return embeddings
|
| 992 |
+
|
| 993 |
+
|
| 994 |
+
class InstructBlipVideoQFormerModel(InstructBlipVideoPreTrainedModel):
|
| 995 |
+
"""
|
| 996 |
+
Querying Transformer (Q-Former), used in InstructBlipVideo. Slightly modified from BLIP-2 as it also takes the
|
| 997 |
+
instruction as input.
|
| 998 |
+
"""
|
| 999 |
+
|
| 1000 |
+
def __init__(self, config: InstructBlipVideoQFormerConfig):
|
| 1001 |
+
super().__init__(config)
|
| 1002 |
+
self.config = config
|
| 1003 |
+
|
| 1004 |
+
self.embeddings = InstructBlipVideoQFormerEmbeddings(config)
|
| 1005 |
+
|
| 1006 |
+
self.encoder = InstructBlipVideoQFormerEncoder(config)
|
| 1007 |
+
|
| 1008 |
+
self.post_init()
|
| 1009 |
+
|
| 1010 |
+
def get_input_embeddings(self):
|
| 1011 |
+
return self.embeddings.word_embeddings
|
| 1012 |
+
|
| 1013 |
+
def set_input_embeddings(self, value):
|
| 1014 |
+
self.embeddings.word_embeddings = value
|
| 1015 |
+
|
| 1016 |
+
def _prune_heads(self, heads_to_prune):
|
| 1017 |
+
"""
|
| 1018 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 1019 |
+
class PreTrainedModel
|
| 1020 |
+
"""
|
| 1021 |
+
for layer, heads in heads_to_prune.items():
|
| 1022 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 1023 |
+
|
| 1024 |
+
def get_extended_attention_mask(
|
| 1025 |
+
self,
|
| 1026 |
+
attention_mask: torch.Tensor,
|
| 1027 |
+
input_shape: Tuple[int],
|
| 1028 |
+
device: torch.device,
|
| 1029 |
+
has_query: bool = False,
|
| 1030 |
+
) -> torch.Tensor:
|
| 1031 |
+
"""
|
| 1032 |
+
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
|
| 1033 |
+
|
| 1034 |
+
Arguments:
|
| 1035 |
+
attention_mask (`torch.Tensor`):
|
| 1036 |
+
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
|
| 1037 |
+
input_shape (`Tuple[int]`):
|
| 1038 |
+
The shape of the input to the model.
|
| 1039 |
+
device: (`torch.device`):
|
| 1040 |
+
The device of the input to the model.
|
| 1041 |
+
|
| 1042 |
+
Returns:
|
| 1043 |
+
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
|
| 1044 |
+
"""
|
| 1045 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
| 1046 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
| 1047 |
+
if attention_mask.dim() == 3:
|
| 1048 |
+
extended_attention_mask = attention_mask[:, None, :, :]
|
| 1049 |
+
elif attention_mask.dim() == 2:
|
| 1050 |
+
# Provided a padding mask of dimensions [batch_size, seq_length]
|
| 1051 |
+
# - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
| 1052 |
+
extended_attention_mask = attention_mask[:, None, None, :]
|
| 1053 |
+
else:
|
| 1054 |
+
raise ValueError(
|
| 1055 |
+
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})",
|
| 1056 |
+
)
|
| 1057 |
+
|
| 1058 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
| 1059 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
| 1060 |
+
# positions we want to attend and -10000.0 for masked positions.
|
| 1061 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
| 1062 |
+
# effectively the same as removing these entirely.
|
| 1063 |
+
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
| 1064 |
+
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
| 1065 |
+
return extended_attention_mask
|
| 1066 |
+
|
| 1067 |
+
def forward(
|
| 1068 |
+
self,
|
| 1069 |
+
input_ids: torch.LongTensor,
|
| 1070 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 1071 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1072 |
+
query_embeds: Optional[torch.Tensor] = None,
|
| 1073 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 1074 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
| 1075 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
| 1076 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 1077 |
+
use_cache: Optional[bool] = None,
|
| 1078 |
+
output_attentions: Optional[bool] = None,
|
| 1079 |
+
output_hidden_states: Optional[bool] = None,
|
| 1080 |
+
return_dict: Optional[bool] = None,
|
| 1081 |
+
) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
|
| 1082 |
+
r"""
|
| 1083 |
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 1084 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
| 1085 |
+
the model is configured as a decoder.
|
| 1086 |
+
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1087 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
| 1088 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
|
| 1089 |
+
- 1 for tokens that are **not masked**,
|
| 1090 |
+
- 0 for tokens that are **masked**.
|
| 1091 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
|
| 1092 |
+
shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
|
| 1093 |
+
value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
|
| 1094 |
+
used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
|
| 1095 |
+
value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
|
| 1096 |
+
`(batch_size, sequence_length)`.
|
| 1097 |
+
use_cache (`bool`, *optional*):
|
| 1098 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 1099 |
+
`past_key_values`).
|
| 1100 |
+
"""
|
| 1101 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1102 |
+
output_hidden_states = (
|
| 1103 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1104 |
+
)
|
| 1105 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1106 |
+
|
| 1107 |
+
if input_ids is None and query_embeds is None:
|
| 1108 |
+
raise ValueError("You have to specify query_embeds when input_ids is None")
|
| 1109 |
+
|
| 1110 |
+
# past_key_values_length
|
| 1111 |
+
past_key_values_length = (
|
| 1112 |
+
past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0
|
| 1113 |
+
)
|
| 1114 |
+
|
| 1115 |
+
query_length = query_embeds.shape[1] if query_embeds is not None else 0
|
| 1116 |
+
|
| 1117 |
+
embedding_output = self.embeddings(
|
| 1118 |
+
input_ids=input_ids,
|
| 1119 |
+
position_ids=position_ids,
|
| 1120 |
+
query_embeds=query_embeds,
|
| 1121 |
+
past_key_values_length=past_key_values_length,
|
| 1122 |
+
)
|
| 1123 |
+
|
| 1124 |
+
input_shape = embedding_output.size()[:-1]
|
| 1125 |
+
batch_size, seq_length = input_shape
|
| 1126 |
+
device = embedding_output.device
|
| 1127 |
+
|
| 1128 |
+
if attention_mask is None:
|
| 1129 |
+
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
| 1130 |
+
|
| 1131 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
| 1132 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
| 1133 |
+
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
|
| 1134 |
+
|
| 1135 |
+
# If a 2D or 3D attention mask is provided for the cross-attention
|
| 1136 |
+
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
| 1137 |
+
if encoder_hidden_states is not None:
|
| 1138 |
+
if isinstance(encoder_hidden_states, list):
|
| 1139 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
|
| 1140 |
+
else:
|
| 1141 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
| 1142 |
+
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
| 1143 |
+
|
| 1144 |
+
if isinstance(encoder_attention_mask, list):
|
| 1145 |
+
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
|
| 1146 |
+
elif encoder_attention_mask is None:
|
| 1147 |
+
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
| 1148 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
| 1149 |
+
else:
|
| 1150 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
| 1151 |
+
else:
|
| 1152 |
+
encoder_extended_attention_mask = None
|
| 1153 |
+
|
| 1154 |
+
# Prepare head mask if needed
|
| 1155 |
+
# 1.0 in head_mask indicate we keep the head
|
| 1156 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 1157 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 1158 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 1159 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 1160 |
+
|
| 1161 |
+
encoder_outputs = self.encoder(
|
| 1162 |
+
embedding_output,
|
| 1163 |
+
attention_mask=extended_attention_mask,
|
| 1164 |
+
head_mask=head_mask,
|
| 1165 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 1166 |
+
encoder_attention_mask=encoder_extended_attention_mask,
|
| 1167 |
+
past_key_values=past_key_values,
|
| 1168 |
+
use_cache=use_cache,
|
| 1169 |
+
output_attentions=output_attentions,
|
| 1170 |
+
output_hidden_states=output_hidden_states,
|
| 1171 |
+
return_dict=return_dict,
|
| 1172 |
+
query_length=query_length,
|
| 1173 |
+
)
|
| 1174 |
+
sequence_output = encoder_outputs[0]
|
| 1175 |
+
pooled_output = sequence_output[:, 0, :]
|
| 1176 |
+
|
| 1177 |
+
if not return_dict:
|
| 1178 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 1179 |
+
|
| 1180 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
| 1181 |
+
last_hidden_state=sequence_output,
|
| 1182 |
+
pooler_output=pooled_output,
|
| 1183 |
+
past_key_values=encoder_outputs.past_key_values,
|
| 1184 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 1185 |
+
attentions=encoder_outputs.attentions,
|
| 1186 |
+
cross_attentions=encoder_outputs.cross_attentions,
|
| 1187 |
+
)
|
| 1188 |
+
|
| 1189 |
+
|
| 1190 |
+
INSTRUCTBLIPVIDEO_START_DOCSTRING = r"""
|
| 1191 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 1192 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 1193 |
+
etc.)
|
| 1194 |
+
|
| 1195 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 1196 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 1197 |
+
and behavior.
|
| 1198 |
+
|
| 1199 |
+
Parameters:
|
| 1200 |
+
config ([`InstructBlipVideoConfig`]): Model configuration class with all the parameters of the model.
|
| 1201 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 1202 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 1203 |
+
"""
|
| 1204 |
+
|
| 1205 |
+
INSTRUCTBLIPVIDEO_INPUTS_DOCSTRING = r"""
|
| 1206 |
+
Args:
|
| 1207 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 1208 |
+
Pixel values. Pixel values can be obtained using [`InstructBlipVideoProcessor`]. See
|
| 1209 |
+
[`InstructBlipVideoProcessor.__call__`] for details.
|
| 1210 |
+
|
| 1211 |
+
qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1212 |
+
Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
|
| 1213 |
+
to serve as text prompt, which the Q-Former model will encode.
|
| 1214 |
+
|
| 1215 |
+
Indices can be obtained using [`InstructBlipVideoProcessor`]. See [`InstructBlipVideoProcessor.__call__`] for
|
| 1216 |
+
details.
|
| 1217 |
+
|
| 1218 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1219 |
+
|
| 1220 |
+
qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1221 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1222 |
+
|
| 1223 |
+
- 1 for tokens that are **not masked**,
|
| 1224 |
+
- 0 for tokens that are **masked**.
|
| 1225 |
+
|
| 1226 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1227 |
+
|
| 1228 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1229 |
+
Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
|
| 1230 |
+
provided to serve as text prompt, which the language model can continue.
|
| 1231 |
+
|
| 1232 |
+
Indices can be obtained using [`InstructBlipVideoProcessor`]. See [`InstructBlipVideoProcessor.__call__`] for
|
| 1233 |
+
details.
|
| 1234 |
+
|
| 1235 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1236 |
+
|
| 1237 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1238 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1239 |
+
|
| 1240 |
+
- 1 for tokens that are **not masked**,
|
| 1241 |
+
- 0 for tokens that are **masked**.
|
| 1242 |
+
|
| 1243 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1244 |
+
|
| 1245 |
+
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
| 1246 |
+
Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an
|
| 1247 |
+
encoder-decoder language model (like T5) is used.
|
| 1248 |
+
|
| 1249 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1250 |
+
[`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)
|
| 1251 |
+
|
| 1252 |
+
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
| 1253 |
+
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
|
| 1254 |
+
be used by default.
|
| 1255 |
+
|
| 1256 |
+
Only relevant in case an encoder-decoder language model (like T5) is used.
|
| 1257 |
+
|
| 1258 |
+
output_attentions (`bool`, *optional*):
|
| 1259 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1260 |
+
tensors for more detail.
|
| 1261 |
+
output_hidden_states (`bool`, *optional*):
|
| 1262 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1263 |
+
more detail.
|
| 1264 |
+
return_dict (`bool`, *optional*):
|
| 1265 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 1266 |
+
interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
|
| 1267 |
+
Whether to interpolate the pre-trained position encodings.
|
| 1268 |
+
"""
|
| 1269 |
+
|
| 1270 |
+
|
| 1271 |
+
@add_start_docstrings(
|
| 1272 |
+
"""
|
| 1273 |
+
InstructBlipVideo Model for generating text given an image and an optional text prompt. The model consists of a vision
|
| 1274 |
+
encoder, Querying Transformer (Q-Former) and a language model.
|
| 1275 |
+
|
| 1276 |
+
One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
|
| 1277 |
+
the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
|
| 1278 |
+
""",
|
| 1279 |
+
INSTRUCTBLIPVIDEO_START_DOCSTRING,
|
| 1280 |
+
)
|
| 1281 |
+
class InstructBlipVideoForConditionalGeneration(InstructBlipVideoPreTrainedModel, GenerationMixin):
|
| 1282 |
+
config_class = InstructBlipVideoConfig
|
| 1283 |
+
main_input_name = "pixel_values"
|
| 1284 |
+
|
| 1285 |
+
def __init__(self, config: InstructBlipVideoConfig):
|
| 1286 |
+
super().__init__(config)
|
| 1287 |
+
|
| 1288 |
+
self.vision_model = InstructBlipVideoVisionModel(config.vision_config)
|
| 1289 |
+
|
| 1290 |
+
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
|
| 1291 |
+
self.qformer = InstructBlipVideoQFormerModel(config.qformer_config)
|
| 1292 |
+
|
| 1293 |
+
self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
|
| 1294 |
+
|
| 1295 |
+
if config.use_decoder_only_language_model:
|
| 1296 |
+
language_model = AutoModelForCausalLM.from_config(config.text_config)
|
| 1297 |
+
else:
|
| 1298 |
+
language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
|
| 1299 |
+
|
| 1300 |
+
if language_model._no_split_modules is not None:
|
| 1301 |
+
self._no_split_modules.extend(language_model._no_split_modules)
|
| 1302 |
+
|
| 1303 |
+
if language_model._keep_in_fp32_modules is not None:
|
| 1304 |
+
self._keep_in_fp32_modules.extend(language_model._keep_in_fp32_modules)
|
| 1305 |
+
|
| 1306 |
+
self.language_model = language_model
|
| 1307 |
+
|
| 1308 |
+
# Initialize weights and apply final processing
|
| 1309 |
+
self.post_init()
|
| 1310 |
+
|
| 1311 |
+
def get_input_embeddings(self):
|
| 1312 |
+
return self.language_model.get_input_embeddings()
|
| 1313 |
+
|
| 1314 |
+
def set_input_embeddings(self, value):
|
| 1315 |
+
self.language_model.set_input_embeddings(value)
|
| 1316 |
+
|
| 1317 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1318 |
+
self.language_model.set_output_embeddings(new_embeddings)
|
| 1319 |
+
|
| 1320 |
+
def get_output_embeddings(self) -> nn.Module:
|
| 1321 |
+
return self.language_model.get_output_embeddings()
|
| 1322 |
+
|
| 1323 |
+
def get_encoder(self):
|
| 1324 |
+
return self.language_model.get_encoder()
|
| 1325 |
+
|
| 1326 |
+
def get_decoder(self):
|
| 1327 |
+
return self.language_model.get_decoder()
|
| 1328 |
+
|
| 1329 |
+
def _tie_weights(self):
|
| 1330 |
+
if not self.config.use_decoder_only_language_model:
|
| 1331 |
+
self.language_model.encoder.embed_tokens = self.language_model.shared
|
| 1332 |
+
self.language_model.decoder.embed_tokens = self.language_model.shared
|
| 1333 |
+
|
| 1334 |
+
def _preprocess_accelerate(self):
|
| 1335 |
+
r"""
|
| 1336 |
+
Some pre-processing hacks to make the model `accelerate` compatible. Check
|
| 1337 |
+
https://github.com/huggingface/transformers/pull/21707 for more details.
|
| 1338 |
+
"""
|
| 1339 |
+
hf_device_map = self.hf_device_map
|
| 1340 |
+
|
| 1341 |
+
if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1:
|
| 1342 |
+
# warn users about unexpected behavior when using multi-GPU + InstructBlipVideo + `accelerate`.
|
| 1343 |
+
logger.warning(
|
| 1344 |
+
"The `language_model` is not in the `hf_device_map` dictionary and you are running your script"
|
| 1345 |
+
" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`."
|
| 1346 |
+
" Please pass a `device_map` that contains `language_model` to remove this warning."
|
| 1347 |
+
" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for"
|
| 1348 |
+
" more details on creating a `device_map` for large models.",
|
| 1349 |
+
)
|
| 1350 |
+
|
| 1351 |
+
if hasattr(self.language_model, "_hf_hook"):
|
| 1352 |
+
self.language_model._hf_hook.io_same_device = True # For `generate` compatibility
|
| 1353 |
+
|
| 1354 |
+
@add_start_docstrings_to_model_forward(INSTRUCTBLIPVIDEO_INPUTS_DOCSTRING)
|
| 1355 |
+
@replace_return_docstrings(
|
| 1356 |
+
output_type=InstructBlipVideoForConditionalGenerationModelOutput, config_class=InstructBlipVideoVisionConfig
|
| 1357 |
+
)
|
| 1358 |
+
def forward(
|
| 1359 |
+
self,
|
| 1360 |
+
pixel_values: torch.FloatTensor,
|
| 1361 |
+
qformer_input_ids: torch.FloatTensor,
|
| 1362 |
+
qformer_attention_mask: Optional[torch.LongTensor] = None,
|
| 1363 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
| 1364 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 1365 |
+
decoder_input_ids: Optional[torch.LongTensor] = None,
|
| 1366 |
+
decoder_attention_mask: Optional[torch.LongTensor] = None,
|
| 1367 |
+
output_attentions: Optional[bool] = None,
|
| 1368 |
+
output_hidden_states: Optional[bool] = None,
|
| 1369 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1370 |
+
return_dict: Optional[bool] = None,
|
| 1371 |
+
interpolate_pos_encoding: bool = False,
|
| 1372 |
+
) -> Union[Tuple, InstructBlipVideoForConditionalGenerationModelOutput]:
|
| 1373 |
+
r"""
|
| 1374 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1375 |
+
Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size -
|
| 1376 |
+
1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
|
| 1377 |
+
config.vocab_size]`
|
| 1378 |
+
|
| 1379 |
+
Returns:
|
| 1380 |
+
|
| 1381 |
+
Examples:
|
| 1382 |
+
|
| 1383 |
+
```python
|
| 1384 |
+
>>> from transformers import InstructBlipVideoProcessor, InstructBlipVideoForConditionalGeneration
|
| 1385 |
+
>>> import torch
|
| 1386 |
+
>>> from huggingface_hub import hf_hub_download
|
| 1387 |
+
>>> import av
|
| 1388 |
+
>>> import numpy as np
|
| 1389 |
+
|
| 1390 |
+
>>> def read_video_pyav(container, indices):
|
| 1391 |
+
... '''
|
| 1392 |
+
... Decode the video with PyAV decoder.
|
| 1393 |
+
... Args:
|
| 1394 |
+
... container (`av.container.input.InputContainer`): PyAV container.
|
| 1395 |
+
... indices (`List[int]`): List of frame indices to decode.
|
| 1396 |
+
... Returns:
|
| 1397 |
+
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
|
| 1398 |
+
... '''
|
| 1399 |
+
... frames = []
|
| 1400 |
+
... container.seek(0)
|
| 1401 |
+
... start_index = indices[0]
|
| 1402 |
+
... end_index = indices[-1]
|
| 1403 |
+
... for i, frame in enumerate(container.decode(video=0)):
|
| 1404 |
+
... if i > end_index:
|
| 1405 |
+
... break
|
| 1406 |
+
... if i >= start_index and i in indices:
|
| 1407 |
+
... frames.append(frame)
|
| 1408 |
+
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
|
| 1409 |
+
|
| 1410 |
+
>>> model = InstructBlipVideoForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto")
|
| 1411 |
+
>>> processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
|
| 1412 |
+
|
| 1413 |
+
>>> file_path = hf_hub_download(
|
| 1414 |
+
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
|
| 1415 |
+
... )
|
| 1416 |
+
>>> container = av.open(file_path)
|
| 1417 |
+
|
| 1418 |
+
>>> # sample uniformly 4 frames from the videWhy is this video funny?o
|
| 1419 |
+
>>> total_frames = container.streams.video[0].frames
|
| 1420 |
+
>>> indices = np.arange(0, total_frames, total_frames / 4).astype(int)
|
| 1421 |
+
>>> clip = read_video_pyav(container, indices)
|
| 1422 |
+
|
| 1423 |
+
>>> prompt = "What is happening in the video?"
|
| 1424 |
+
>>> inputs = processor(text=prompt, images=clip, return_tensors="pt").to(model.device)
|
| 1425 |
+
|
| 1426 |
+
>>> outputs = model.generate(
|
| 1427 |
+
... **inputs,
|
| 1428 |
+
... do_sample=False,
|
| 1429 |
+
... num_beams=5,
|
| 1430 |
+
... max_length=256,
|
| 1431 |
+
... repetition_penalty=1.5,
|
| 1432 |
+
... length_penalty=1.0,
|
| 1433 |
+
... )
|
| 1434 |
+
>>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
|
| 1435 |
+
>>> print(generated_text)
|
| 1436 |
+
"A person is eating a bowl of pasta, and they are using a fork to eat it. The person is sitting at a table, and the plate of pasta is on the table in front"
|
| 1437 |
+
```"""
|
| 1438 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1439 |
+
|
| 1440 |
+
# step 1: forward the images through the vision encoder,
|
| 1441 |
+
# we process in a batched way, later unbatch it back (video has frames=4 always)
|
| 1442 |
+
batch_size, frames, channel, height, width = pixel_values.shape
|
| 1443 |
+
pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width)
|
| 1444 |
+
|
| 1445 |
+
vision_outputs = self.vision_model(
|
| 1446 |
+
pixel_values=pixel_values,
|
| 1447 |
+
output_attentions=output_attentions,
|
| 1448 |
+
output_hidden_states=output_hidden_states,
|
| 1449 |
+
return_dict=return_dict,
|
| 1450 |
+
interpolate_pos_encoding=interpolate_pos_encoding,
|
| 1451 |
+
)
|
| 1452 |
+
image_embeds = vision_outputs[0]
|
| 1453 |
+
|
| 1454 |
+
# step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
|
| 1455 |
+
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
| 1456 |
+
|
| 1457 |
+
# difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former
|
| 1458 |
+
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
|
| 1459 |
+
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
| 1460 |
+
|
| 1461 |
+
if qformer_attention_mask is None:
|
| 1462 |
+
qformer_attention_mask = torch.ones_like(qformer_input_ids)
|
| 1463 |
+
|
| 1464 |
+
qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0)
|
| 1465 |
+
qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0)
|
| 1466 |
+
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
|
| 1467 |
+
query_outputs = self.qformer(
|
| 1468 |
+
input_ids=qformer_input_ids,
|
| 1469 |
+
attention_mask=qformer_attention_mask,
|
| 1470 |
+
query_embeds=query_tokens,
|
| 1471 |
+
encoder_hidden_states=image_embeds,
|
| 1472 |
+
encoder_attention_mask=image_attention_mask,
|
| 1473 |
+
output_attentions=output_attentions,
|
| 1474 |
+
output_hidden_states=output_hidden_states,
|
| 1475 |
+
return_dict=return_dict,
|
| 1476 |
+
)
|
| 1477 |
+
query_output = query_outputs[0][:, : query_tokens.size(1), :]
|
| 1478 |
+
|
| 1479 |
+
# step 3: use the language model, conditioned on the query outputs and the prompt
|
| 1480 |
+
language_model_inputs = self.language_projection(query_output)
|
| 1481 |
+
|
| 1482 |
+
# unbatch inputs back, each video-frame gets `num_query_tokens` seq length
|
| 1483 |
+
language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1)
|
| 1484 |
+
language_model_attention_mask = torch.ones(
|
| 1485 |
+
language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
|
| 1486 |
+
)
|
| 1487 |
+
|
| 1488 |
+
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 1489 |
+
if attention_mask is None:
|
| 1490 |
+
attention_mask = torch.ones_like(input_ids)
|
| 1491 |
+
|
| 1492 |
+
# if the model already has "video_token_index" then the input is expanded to account for image embeds
|
| 1493 |
+
# otherwise we expand manually by concatenating
|
| 1494 |
+
if getattr(self.config, "video_token_index", None) is not None:
|
| 1495 |
+
special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1).expand_as(inputs_embeds)
|
| 1496 |
+
inputs_embeds[special_image_mask] = language_model_inputs.flatten()
|
| 1497 |
+
else:
|
| 1498 |
+
logger.warning_once(
|
| 1499 |
+
"Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. "
|
| 1500 |
+
"Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. "
|
| 1501 |
+
"Using processors without these attributes in the config is deprecated and will throw an error in v4.47."
|
| 1502 |
+
)
|
| 1503 |
+
inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
|
| 1504 |
+
attention_mask = torch.cat(
|
| 1505 |
+
[language_model_attention_mask, attention_mask.to(language_model_attention_mask.device)], dim=1
|
| 1506 |
+
)
|
| 1507 |
+
|
| 1508 |
+
if self.config.use_decoder_only_language_model:
|
| 1509 |
+
outputs = self.language_model(
|
| 1510 |
+
inputs_embeds=inputs_embeds,
|
| 1511 |
+
attention_mask=attention_mask,
|
| 1512 |
+
output_attentions=output_attentions,
|
| 1513 |
+
output_hidden_states=output_hidden_states,
|
| 1514 |
+
return_dict=return_dict,
|
| 1515 |
+
)
|
| 1516 |
+
logits = outputs.logits if return_dict else outputs[0]
|
| 1517 |
+
loss = None
|
| 1518 |
+
# we compute the loss here since we need to take into account the sequence length of the query embeds
|
| 1519 |
+
if labels is not None:
|
| 1520 |
+
labels = labels.to(logits.device)
|
| 1521 |
+
logits = logits[:, -labels.size(1) :, :]
|
| 1522 |
+
# Shift so that tokens < n predict n
|
| 1523 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 1524 |
+
shift_labels = labels[..., 1:].contiguous().to(logits.device)
|
| 1525 |
+
|
| 1526 |
+
# Flatten the tokens
|
| 1527 |
+
loss_fct = CrossEntropyLoss(reduction="mean")
|
| 1528 |
+
|
| 1529 |
+
loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
|
| 1530 |
+
else:
|
| 1531 |
+
outputs = self.language_model(
|
| 1532 |
+
inputs_embeds=inputs_embeds,
|
| 1533 |
+
attention_mask=attention_mask,
|
| 1534 |
+
decoder_input_ids=decoder_input_ids,
|
| 1535 |
+
decoder_attention_mask=decoder_attention_mask,
|
| 1536 |
+
output_attentions=output_attentions,
|
| 1537 |
+
output_hidden_states=output_hidden_states,
|
| 1538 |
+
return_dict=return_dict,
|
| 1539 |
+
labels=labels,
|
| 1540 |
+
)
|
| 1541 |
+
loss = outputs.loss if return_dict else outputs[0]
|
| 1542 |
+
logits = outputs.logits if return_dict else outputs[1]
|
| 1543 |
+
|
| 1544 |
+
if not return_dict:
|
| 1545 |
+
output = (logits, vision_outputs, query_outputs, outputs)
|
| 1546 |
+
return ((loss,) + output) if loss is not None else output
|
| 1547 |
+
|
| 1548 |
+
return InstructBlipVideoForConditionalGenerationModelOutput(
|
| 1549 |
+
loss=loss,
|
| 1550 |
+
logits=logits,
|
| 1551 |
+
vision_outputs=vision_outputs,
|
| 1552 |
+
qformer_outputs=query_outputs,
|
| 1553 |
+
language_model_outputs=outputs,
|
| 1554 |
+
)
|
| 1555 |
+
|
| 1556 |
+
@torch.no_grad()
|
| 1557 |
+
def generate(
|
| 1558 |
+
self,
|
| 1559 |
+
pixel_values: torch.FloatTensor,
|
| 1560 |
+
qformer_input_ids: Optional[torch.LongTensor] = None,
|
| 1561 |
+
qformer_attention_mask: Optional[torch.LongTensor] = None,
|
| 1562 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1563 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 1564 |
+
interpolate_pos_encoding: bool = False,
|
| 1565 |
+
**generate_kwargs,
|
| 1566 |
+
) -> torch.LongTensor:
|
| 1567 |
+
r"""
|
| 1568 |
+
Overrides `generate` function to be able to use the model as a conditional generator.
|
| 1569 |
+
|
| 1570 |
+
Args:
|
| 1571 |
+
pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width) or
|
| 1572 |
+
(batch_size, num_frames, num_channels, height, width)): Input images or videos to be processed.
|
| 1573 |
+
qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
| 1574 |
+
The sequence used as a prompt to be fed to the Q-Former module.
|
| 1575 |
+
qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
| 1576 |
+
Mask to avoid performing attention on padding token indices.
|
| 1577 |
+
input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
| 1578 |
+
The sequence used as a prompt for the generation.
|
| 1579 |
+
attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
| 1580 |
+
Mask to avoid performing attention on padding token indices.
|
| 1581 |
+
interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
|
| 1582 |
+
Whether to interpolate the positional encoding of the image embeddings.
|
| 1583 |
+
|
| 1584 |
+
Returns:
|
| 1585 |
+
captions (list): A list of strings of length batch_size * num_captions.
|
| 1586 |
+
"""
|
| 1587 |
+
if hasattr(self, "hf_device_map"):
|
| 1588 |
+
# preprocess for `accelerate`
|
| 1589 |
+
self._preprocess_accelerate()
|
| 1590 |
+
|
| 1591 |
+
# we process in a batched way, later unbatch it back (video has frames=4)
|
| 1592 |
+
batch_size, frames, channel, height, width = pixel_values.shape
|
| 1593 |
+
pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width)
|
| 1594 |
+
|
| 1595 |
+
image_embeds = self.vision_model(
|
| 1596 |
+
pixel_values,
|
| 1597 |
+
return_dict=True,
|
| 1598 |
+
interpolate_pos_encoding=interpolate_pos_encoding,
|
| 1599 |
+
).last_hidden_state
|
| 1600 |
+
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
| 1601 |
+
|
| 1602 |
+
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
|
| 1603 |
+
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
| 1604 |
+
if qformer_attention_mask is None:
|
| 1605 |
+
qformer_attention_mask = torch.ones_like(qformer_input_ids)
|
| 1606 |
+
|
| 1607 |
+
qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0)
|
| 1608 |
+
qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0)
|
| 1609 |
+
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
|
| 1610 |
+
query_outputs = self.qformer(
|
| 1611 |
+
input_ids=qformer_input_ids,
|
| 1612 |
+
attention_mask=qformer_attention_mask,
|
| 1613 |
+
query_embeds=query_tokens,
|
| 1614 |
+
encoder_hidden_states=image_embeds,
|
| 1615 |
+
encoder_attention_mask=image_attention_mask,
|
| 1616 |
+
return_dict=True,
|
| 1617 |
+
)
|
| 1618 |
+
query_output = query_outputs.last_hidden_state[:, : query_tokens.size(1), :]
|
| 1619 |
+
|
| 1620 |
+
language_model_inputs = self.language_projection(query_output)
|
| 1621 |
+
|
| 1622 |
+
# unbatch the embeddings back by moving frames to seq-len
|
| 1623 |
+
language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1)
|
| 1624 |
+
language_attention_mask = torch.ones(
|
| 1625 |
+
language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
|
| 1626 |
+
)
|
| 1627 |
+
|
| 1628 |
+
if input_ids is None:
|
| 1629 |
+
start_tokens = [self.config.text_config.bos_token_id]
|
| 1630 |
+
if getattr(self.config, "video_token_index", None) is not None:
|
| 1631 |
+
start_tokens = [self.config.video_token_index] * self.config.num_query_tokens * 4 + start_tokens
|
| 1632 |
+
input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device)
|
| 1633 |
+
input_ids = input_ids.repeat(batch_size, 1)
|
| 1634 |
+
|
| 1635 |
+
if attention_mask is None:
|
| 1636 |
+
attention_mask = torch.ones_like(input_ids)
|
| 1637 |
+
|
| 1638 |
+
inputs_embeds = self.get_input_embeddings()(input_ids)
|
| 1639 |
+
|
| 1640 |
+
# if the model already has "video_token_index" then the input is expanded to account for image embeds
|
| 1641 |
+
# otherwise we expand manually by concatenating
|
| 1642 |
+
if getattr(self.config, "video_token_index", None) is not None:
|
| 1643 |
+
special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1).expand_as(inputs_embeds)
|
| 1644 |
+
inputs_embeds[special_image_mask] = language_model_inputs.flatten()
|
| 1645 |
+
else:
|
| 1646 |
+
logger.warning_once(
|
| 1647 |
+
"Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. "
|
| 1648 |
+
"Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. "
|
| 1649 |
+
"Using processors without these attributes in the config is deprecated and will throw an error in v4.47."
|
| 1650 |
+
)
|
| 1651 |
+
inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
|
| 1652 |
+
attention_mask = torch.cat(
|
| 1653 |
+
[language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1
|
| 1654 |
+
)
|
| 1655 |
+
|
| 1656 |
+
# add image_embeds length to max_length, so that the final max_length in counted only on token embeds
|
| 1657 |
+
# -1 is to account for the prepended BOS after `generate.`
|
| 1658 |
+
if not self.language_model.config.is_encoder_decoder:
|
| 1659 |
+
generate_kwargs["max_length"] = (
|
| 1660 |
+
generate_kwargs.get("max_length", 20) + language_model_inputs.shape[1] - 1
|
| 1661 |
+
)
|
| 1662 |
+
generate_kwargs["min_length"] = generate_kwargs.get("min_length", 0) + language_model_inputs.shape[1]
|
| 1663 |
+
|
| 1664 |
+
inputs = {"inputs_embeds": inputs_embeds, "attention_mask": attention_mask}
|
| 1665 |
+
if not self.language_model.config.is_encoder_decoder:
|
| 1666 |
+
inputs["input_ids"] = input_ids
|
| 1667 |
+
|
| 1668 |
+
outputs = self.language_model.generate(**inputs, **generate_kwargs)
|
| 1669 |
+
|
| 1670 |
+
return outputs
|
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/modular_instructblipvideo.py
ADDED
|
@@ -0,0 +1,483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
from typing import Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
import torch.utils.checkpoint
|
| 21 |
+
from torch.nn import CrossEntropyLoss
|
| 22 |
+
|
| 23 |
+
from transformers.models.instructblip.configuration_instructblip import (
|
| 24 |
+
InstructBlipQFormerConfig,
|
| 25 |
+
InstructBlipVisionConfig,
|
| 26 |
+
)
|
| 27 |
+
from transformers.models.instructblip.modeling_instructblip import (
|
| 28 |
+
InstructBlipForConditionalGeneration,
|
| 29 |
+
InstructBlipForConditionalGenerationModelOutput,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
from ...configuration_utils import PretrainedConfig
|
| 33 |
+
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
|
| 34 |
+
from ...utils import logging
|
| 35 |
+
from ..auto import CONFIG_MAPPING, AutoConfig
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
logger = logging.get_logger(__name__)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class InstructBlipVideoVisionConfig(InstructBlipVisionConfig):
|
| 42 |
+
pass
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class InstructBlipVideoQFormerConfig(InstructBlipQFormerConfig):
|
| 46 |
+
pass
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class InstructBlipVideoConfig(PretrainedConfig):
|
| 50 |
+
r"""
|
| 51 |
+
[`InstructBlipVideoConfig`] is the configuration class to store the configuration of a
|
| 52 |
+
[`InstructBlipVideoForConditionalGeneration`]. It is used to instantiate a Instructblipvideo model according to the specified
|
| 53 |
+
arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
|
| 54 |
+
the defaults will yield a similar configuration to that of the Instructblipvideo
|
| 55 |
+
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
|
| 56 |
+
|
| 57 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 58 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
vision_config (`dict`, *optional*):
|
| 62 |
+
Dictionary of configuration options used to initialize [`InstructBlipVideoVisionConfig`].
|
| 63 |
+
qformer_config (`dict`, *optional*):
|
| 64 |
+
Dictionary of configuration options used to initialize [`InstructBlipVideoQFormerConfig`].
|
| 65 |
+
text_config (`dict`, *optional*):
|
| 66 |
+
Dictionary of configuration options used to initialize any [`PretrainedConfig`].
|
| 67 |
+
num_query_tokens (`int`, *optional*, defaults to 32):
|
| 68 |
+
The number of query tokens passed through the Transformer.
|
| 69 |
+
|
| 70 |
+
video_token_index (`int`, *optional*):
|
| 71 |
+
Token index of special video token.
|
| 72 |
+
kwargs (*optional*):
|
| 73 |
+
Dictionary of keyword arguments.
|
| 74 |
+
|
| 75 |
+
Example:
|
| 76 |
+
|
| 77 |
+
```python
|
| 78 |
+
>>> from transformers import (
|
| 79 |
+
... InstructBlipVideoVisionConfig,
|
| 80 |
+
... InstructBlipVideoQFormerConfig,
|
| 81 |
+
... OPTConfig,
|
| 82 |
+
... InstructBlipVideoConfig,
|
| 83 |
+
... InstructBlipVideoForConditionalGeneration,
|
| 84 |
+
... )
|
| 85 |
+
|
| 86 |
+
>>> # Initializing a InstructBlipVideoConfig with Salesforce/instruct-blip-flan-t5 style configuration
|
| 87 |
+
>>> configuration = InstructBlipVideoConfig()
|
| 88 |
+
|
| 89 |
+
>>> # Initializing a InstructBlipVideoForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
|
| 90 |
+
>>> model = InstructBlipVideoForConditionalGeneration(configuration)
|
| 91 |
+
|
| 92 |
+
>>> # Accessing the model configuration
|
| 93 |
+
>>> configuration = model.config
|
| 94 |
+
|
| 95 |
+
>>> # We can also initialize a InstructBlipVideoConfig from a InstructBlipVideoVisionConfig, InstructBlipVideoQFormerConfig and any PretrainedConfig
|
| 96 |
+
|
| 97 |
+
>>> # Initializing Instructblipvideo vision, Instructblipvideo Q-Former and language model configurations
|
| 98 |
+
>>> vision_config = InstructBlipVideoVisionConfig()
|
| 99 |
+
>>> qformer_config = InstructBlipVideoQFormerConfig()
|
| 100 |
+
>>> text_config = OPTConfig()
|
| 101 |
+
|
| 102 |
+
>>> config = InstructBlipVideoConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
|
| 103 |
+
```"""
|
| 104 |
+
|
| 105 |
+
model_type = "instructblipvideo"
|
| 106 |
+
sub_configs = {
|
| 107 |
+
"text_config": AutoConfig,
|
| 108 |
+
"qformer_config": InstructBlipVideoQFormerConfig,
|
| 109 |
+
"vision_config": InstructBlipVideoVisionConfig,
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
def __init__(
|
| 113 |
+
self,
|
| 114 |
+
vision_config=None,
|
| 115 |
+
qformer_config=None,
|
| 116 |
+
text_config=None,
|
| 117 |
+
num_query_tokens=32,
|
| 118 |
+
video_token_index=None,
|
| 119 |
+
**kwargs,
|
| 120 |
+
):
|
| 121 |
+
super().__init__(**kwargs)
|
| 122 |
+
|
| 123 |
+
if vision_config is None:
|
| 124 |
+
vision_config = {}
|
| 125 |
+
logger.info("vision_config is None. initializing the InstructBlipVideoVisionConfig with default values.")
|
| 126 |
+
|
| 127 |
+
if qformer_config is None:
|
| 128 |
+
qformer_config = {}
|
| 129 |
+
logger.info("qformer_config is None. Initializing the InstructBlipVideoQFormerConfig with default values.")
|
| 130 |
+
|
| 131 |
+
if text_config is None:
|
| 132 |
+
text_config = {}
|
| 133 |
+
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
|
| 134 |
+
|
| 135 |
+
self.vision_config = InstructBlipVideoVisionConfig(**vision_config)
|
| 136 |
+
self.qformer_config = InstructBlipVideoQFormerConfig(**qformer_config)
|
| 137 |
+
text_model_type = text_config["model_type"] if "model_type" in text_config else "opt"
|
| 138 |
+
self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
|
| 139 |
+
|
| 140 |
+
self.tie_word_embeddings = self.text_config.tie_word_embeddings
|
| 141 |
+
self.is_encoder_decoder = self.text_config.is_encoder_decoder
|
| 142 |
+
|
| 143 |
+
self.num_query_tokens = num_query_tokens
|
| 144 |
+
self.video_token_index = video_token_index
|
| 145 |
+
self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
|
| 146 |
+
self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
|
| 147 |
+
self.initializer_factor = 1.0
|
| 148 |
+
self.initializer_range = 0.02
|
| 149 |
+
|
| 150 |
+
@classmethod
|
| 151 |
+
def from_vision_qformer_text_configs(
|
| 152 |
+
cls,
|
| 153 |
+
vision_config: InstructBlipVideoVisionConfig,
|
| 154 |
+
qformer_config: InstructBlipVideoQFormerConfig,
|
| 155 |
+
text_config: PretrainedConfig,
|
| 156 |
+
**kwargs,
|
| 157 |
+
):
|
| 158 |
+
r"""
|
| 159 |
+
Instantiate a [`InstructBlipVideoConfig`] (or a derived class) from a InstructBlipVideo vision model, Q-Former and
|
| 160 |
+
language model configurations.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
[`InstructBlipVideoConfig`]: An instance of a configuration object
|
| 164 |
+
"""
|
| 165 |
+
|
| 166 |
+
return cls(
|
| 167 |
+
vision_config=vision_config.to_dict(),
|
| 168 |
+
qformer_config=qformer_config.to_dict(),
|
| 169 |
+
text_config=text_config.to_dict(),
|
| 170 |
+
**kwargs,
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
@dataclass
|
| 175 |
+
class InstructBlipVideoForConditionalGenerationModelOutput(InstructBlipForConditionalGenerationModelOutput):
|
| 176 |
+
pass
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class InstructBlipVideoForConditionalGeneration(InstructBlipForConditionalGeneration):
|
| 180 |
+
def forward(
|
| 181 |
+
self,
|
| 182 |
+
pixel_values: torch.FloatTensor,
|
| 183 |
+
qformer_input_ids: torch.FloatTensor,
|
| 184 |
+
qformer_attention_mask: Optional[torch.LongTensor] = None,
|
| 185 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
| 186 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 187 |
+
decoder_input_ids: Optional[torch.LongTensor] = None,
|
| 188 |
+
decoder_attention_mask: Optional[torch.LongTensor] = None,
|
| 189 |
+
output_attentions: Optional[bool] = None,
|
| 190 |
+
output_hidden_states: Optional[bool] = None,
|
| 191 |
+
labels: Optional[torch.LongTensor] = None,
|
| 192 |
+
return_dict: Optional[bool] = None,
|
| 193 |
+
interpolate_pos_encoding: bool = False,
|
| 194 |
+
) -> Union[Tuple, InstructBlipVideoForConditionalGenerationModelOutput]:
|
| 195 |
+
r"""
|
| 196 |
+
```python
|
| 197 |
+
>>> from transformers import InstructBlipVideoProcessor, InstructBlipVideoForConditionalGeneration
|
| 198 |
+
>>> import torch
|
| 199 |
+
>>> from huggingface_hub import hf_hub_download
|
| 200 |
+
>>> import av
|
| 201 |
+
>>> import numpy as np
|
| 202 |
+
|
| 203 |
+
>>> def read_video_pyav(container, indices):
|
| 204 |
+
... '''
|
| 205 |
+
... Decode the video with PyAV decoder.
|
| 206 |
+
... Args:
|
| 207 |
+
... container (`av.container.input.InputContainer`): PyAV container.
|
| 208 |
+
... indices (`List[int]`): List of frame indices to decode.
|
| 209 |
+
... Returns:
|
| 210 |
+
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
|
| 211 |
+
... '''
|
| 212 |
+
... frames = []
|
| 213 |
+
... container.seek(0)
|
| 214 |
+
... start_index = indices[0]
|
| 215 |
+
... end_index = indices[-1]
|
| 216 |
+
... for i, frame in enumerate(container.decode(video=0)):
|
| 217 |
+
... if i > end_index:
|
| 218 |
+
... break
|
| 219 |
+
... if i >= start_index and i in indices:
|
| 220 |
+
... frames.append(frame)
|
| 221 |
+
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
|
| 222 |
+
|
| 223 |
+
>>> model = InstructBlipVideoForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto")
|
| 224 |
+
>>> processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
|
| 225 |
+
|
| 226 |
+
>>> file_path = hf_hub_download(
|
| 227 |
+
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
|
| 228 |
+
... )
|
| 229 |
+
>>> container = av.open(file_path)
|
| 230 |
+
|
| 231 |
+
>>> # sample uniformly 4 frames from the videWhy is this video funny?o
|
| 232 |
+
>>> total_frames = container.streams.video[0].frames
|
| 233 |
+
>>> indices = np.arange(0, total_frames, total_frames / 4).astype(int)
|
| 234 |
+
>>> clip = read_video_pyav(container, indices)
|
| 235 |
+
|
| 236 |
+
>>> prompt = "What is happening in the video?"
|
| 237 |
+
>>> inputs = processor(text=prompt, images=clip, return_tensors="pt").to(model.device)
|
| 238 |
+
|
| 239 |
+
>>> outputs = model.generate(
|
| 240 |
+
... **inputs,
|
| 241 |
+
... do_sample=False,
|
| 242 |
+
... num_beams=5,
|
| 243 |
+
... max_length=256,
|
| 244 |
+
... repetition_penalty=1.5,
|
| 245 |
+
... length_penalty=1.0,
|
| 246 |
+
... )
|
| 247 |
+
>>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
|
| 248 |
+
>>> print(generated_text)
|
| 249 |
+
"A person is eating a bowl of pasta, and they are using a fork to eat it. The person is sitting at a table, and the plate of pasta is on the table in front"
|
| 250 |
+
```"""
|
| 251 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 252 |
+
|
| 253 |
+
# step 1: forward the images through the vision encoder,
|
| 254 |
+
# we process in a batched way, later unbatch it back (video has frames=4 always)
|
| 255 |
+
batch_size, frames, channel, height, width = pixel_values.shape
|
| 256 |
+
pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width)
|
| 257 |
+
|
| 258 |
+
vision_outputs = self.vision_model(
|
| 259 |
+
pixel_values=pixel_values,
|
| 260 |
+
output_attentions=output_attentions,
|
| 261 |
+
output_hidden_states=output_hidden_states,
|
| 262 |
+
return_dict=return_dict,
|
| 263 |
+
interpolate_pos_encoding=interpolate_pos_encoding,
|
| 264 |
+
)
|
| 265 |
+
image_embeds = vision_outputs[0]
|
| 266 |
+
|
| 267 |
+
# step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
|
| 268 |
+
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
| 269 |
+
|
| 270 |
+
# difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former
|
| 271 |
+
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
|
| 272 |
+
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
| 273 |
+
|
| 274 |
+
if qformer_attention_mask is None:
|
| 275 |
+
qformer_attention_mask = torch.ones_like(qformer_input_ids)
|
| 276 |
+
|
| 277 |
+
qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0)
|
| 278 |
+
qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0)
|
| 279 |
+
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
|
| 280 |
+
query_outputs = self.qformer(
|
| 281 |
+
input_ids=qformer_input_ids,
|
| 282 |
+
attention_mask=qformer_attention_mask,
|
| 283 |
+
query_embeds=query_tokens,
|
| 284 |
+
encoder_hidden_states=image_embeds,
|
| 285 |
+
encoder_attention_mask=image_attention_mask,
|
| 286 |
+
output_attentions=output_attentions,
|
| 287 |
+
output_hidden_states=output_hidden_states,
|
| 288 |
+
return_dict=return_dict,
|
| 289 |
+
)
|
| 290 |
+
query_output = query_outputs[0][:, : query_tokens.size(1), :]
|
| 291 |
+
|
| 292 |
+
# step 3: use the language model, conditioned on the query outputs and the prompt
|
| 293 |
+
language_model_inputs = self.language_projection(query_output)
|
| 294 |
+
|
| 295 |
+
# unbatch inputs back, each video-frame gets `num_query_tokens` seq length
|
| 296 |
+
language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1)
|
| 297 |
+
language_model_attention_mask = torch.ones(
|
| 298 |
+
language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 302 |
+
if attention_mask is None:
|
| 303 |
+
attention_mask = torch.ones_like(input_ids)
|
| 304 |
+
|
| 305 |
+
# if the model already has "video_token_index" then the input is expanded to account for image embeds
|
| 306 |
+
# otherwise we expand manually by concatenating
|
| 307 |
+
if getattr(self.config, "video_token_index", None) is not None:
|
| 308 |
+
special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1).expand_as(inputs_embeds)
|
| 309 |
+
inputs_embeds[special_image_mask] = language_model_inputs.flatten()
|
| 310 |
+
else:
|
| 311 |
+
logger.warning_once(
|
| 312 |
+
"Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. "
|
| 313 |
+
"Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. "
|
| 314 |
+
"Using processors without these attributes in the config is deprecated and will throw an error in v4.47."
|
| 315 |
+
)
|
| 316 |
+
inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
|
| 317 |
+
attention_mask = torch.cat(
|
| 318 |
+
[language_model_attention_mask, attention_mask.to(language_model_attention_mask.device)], dim=1
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
if self.config.use_decoder_only_language_model:
|
| 322 |
+
outputs = self.language_model(
|
| 323 |
+
inputs_embeds=inputs_embeds,
|
| 324 |
+
attention_mask=attention_mask,
|
| 325 |
+
output_attentions=output_attentions,
|
| 326 |
+
output_hidden_states=output_hidden_states,
|
| 327 |
+
return_dict=return_dict,
|
| 328 |
+
)
|
| 329 |
+
logits = outputs.logits if return_dict else outputs[0]
|
| 330 |
+
loss = None
|
| 331 |
+
# we compute the loss here since we need to take into account the sequence length of the query embeds
|
| 332 |
+
if labels is not None:
|
| 333 |
+
labels = labels.to(logits.device)
|
| 334 |
+
logits = logits[:, -labels.size(1) :, :]
|
| 335 |
+
# Shift so that tokens < n predict n
|
| 336 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 337 |
+
shift_labels = labels[..., 1:].contiguous().to(logits.device)
|
| 338 |
+
|
| 339 |
+
# Flatten the tokens
|
| 340 |
+
loss_fct = CrossEntropyLoss(reduction="mean")
|
| 341 |
+
|
| 342 |
+
loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
|
| 343 |
+
else:
|
| 344 |
+
outputs = self.language_model(
|
| 345 |
+
inputs_embeds=inputs_embeds,
|
| 346 |
+
attention_mask=attention_mask,
|
| 347 |
+
decoder_input_ids=decoder_input_ids,
|
| 348 |
+
decoder_attention_mask=decoder_attention_mask,
|
| 349 |
+
output_attentions=output_attentions,
|
| 350 |
+
output_hidden_states=output_hidden_states,
|
| 351 |
+
return_dict=return_dict,
|
| 352 |
+
labels=labels,
|
| 353 |
+
)
|
| 354 |
+
loss = outputs.loss if return_dict else outputs[0]
|
| 355 |
+
logits = outputs.logits if return_dict else outputs[1]
|
| 356 |
+
|
| 357 |
+
if not return_dict:
|
| 358 |
+
output = (logits, vision_outputs, query_outputs, outputs)
|
| 359 |
+
return ((loss,) + output) if loss is not None else output
|
| 360 |
+
|
| 361 |
+
return InstructBlipVideoForConditionalGenerationModelOutput(
|
| 362 |
+
loss=loss,
|
| 363 |
+
logits=logits,
|
| 364 |
+
vision_outputs=vision_outputs,
|
| 365 |
+
qformer_outputs=query_outputs,
|
| 366 |
+
language_model_outputs=outputs,
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
@torch.no_grad()
|
| 370 |
+
def generate(
|
| 371 |
+
self,
|
| 372 |
+
pixel_values: torch.FloatTensor,
|
| 373 |
+
qformer_input_ids: Optional[torch.LongTensor] = None,
|
| 374 |
+
qformer_attention_mask: Optional[torch.LongTensor] = None,
|
| 375 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 376 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 377 |
+
interpolate_pos_encoding: bool = False,
|
| 378 |
+
**generate_kwargs,
|
| 379 |
+
) -> torch.LongTensor:
|
| 380 |
+
r"""
|
| 381 |
+
Overrides `generate` function to be able to use the model as a conditional generator.
|
| 382 |
+
|
| 383 |
+
Args:
|
| 384 |
+
pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width) or
|
| 385 |
+
(batch_size, num_frames, num_channels, height, width)): Input images or videos to be processed.
|
| 386 |
+
qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
| 387 |
+
The sequence used as a prompt to be fed to the Q-Former module.
|
| 388 |
+
qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
| 389 |
+
Mask to avoid performing attention on padding token indices.
|
| 390 |
+
input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
| 391 |
+
The sequence used as a prompt for the generation.
|
| 392 |
+
attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
|
| 393 |
+
Mask to avoid performing attention on padding token indices.
|
| 394 |
+
interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
|
| 395 |
+
Whether to interpolate the positional encoding of the image embeddings.
|
| 396 |
+
|
| 397 |
+
Returns:
|
| 398 |
+
captions (list): A list of strings of length batch_size * num_captions.
|
| 399 |
+
"""
|
| 400 |
+
if hasattr(self, "hf_device_map"):
|
| 401 |
+
# preprocess for `accelerate`
|
| 402 |
+
self._preprocess_accelerate()
|
| 403 |
+
|
| 404 |
+
# we process in a batched way, later unbatch it back (video has frames=4)
|
| 405 |
+
batch_size, frames, channel, height, width = pixel_values.shape
|
| 406 |
+
pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width)
|
| 407 |
+
|
| 408 |
+
image_embeds = self.vision_model(
|
| 409 |
+
pixel_values,
|
| 410 |
+
return_dict=True,
|
| 411 |
+
interpolate_pos_encoding=interpolate_pos_encoding,
|
| 412 |
+
).last_hidden_state
|
| 413 |
+
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
| 414 |
+
|
| 415 |
+
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
|
| 416 |
+
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
|
| 417 |
+
if qformer_attention_mask is None:
|
| 418 |
+
qformer_attention_mask = torch.ones_like(qformer_input_ids)
|
| 419 |
+
|
| 420 |
+
qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0)
|
| 421 |
+
qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0)
|
| 422 |
+
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
|
| 423 |
+
query_outputs = self.qformer(
|
| 424 |
+
input_ids=qformer_input_ids,
|
| 425 |
+
attention_mask=qformer_attention_mask,
|
| 426 |
+
query_embeds=query_tokens,
|
| 427 |
+
encoder_hidden_states=image_embeds,
|
| 428 |
+
encoder_attention_mask=image_attention_mask,
|
| 429 |
+
return_dict=True,
|
| 430 |
+
)
|
| 431 |
+
query_output = query_outputs.last_hidden_state[:, : query_tokens.size(1), :]
|
| 432 |
+
|
| 433 |
+
language_model_inputs = self.language_projection(query_output)
|
| 434 |
+
|
| 435 |
+
# unbatch the embeddings back by moving frames to seq-len
|
| 436 |
+
language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1)
|
| 437 |
+
language_attention_mask = torch.ones(
|
| 438 |
+
language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
if input_ids is None:
|
| 442 |
+
start_tokens = [self.config.text_config.bos_token_id]
|
| 443 |
+
if getattr(self.config, "video_token_index", None) is not None:
|
| 444 |
+
start_tokens = [self.config.video_token_index] * self.config.num_query_tokens * 4 + start_tokens
|
| 445 |
+
input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device)
|
| 446 |
+
input_ids = input_ids.repeat(batch_size, 1)
|
| 447 |
+
|
| 448 |
+
if attention_mask is None:
|
| 449 |
+
attention_mask = torch.ones_like(input_ids)
|
| 450 |
+
|
| 451 |
+
inputs_embeds = self.get_input_embeddings()(input_ids)
|
| 452 |
+
|
| 453 |
+
# if the model already has "video_token_index" then the input is expanded to account for image embeds
|
| 454 |
+
# otherwise we expand manually by concatenating
|
| 455 |
+
if getattr(self.config, "video_token_index", None) is not None:
|
| 456 |
+
special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1).expand_as(inputs_embeds)
|
| 457 |
+
inputs_embeds[special_image_mask] = language_model_inputs.flatten()
|
| 458 |
+
else:
|
| 459 |
+
logger.warning_once(
|
| 460 |
+
"Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. "
|
| 461 |
+
"Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. "
|
| 462 |
+
"Using processors without these attributes in the config is deprecated and will throw an error in v4.47."
|
| 463 |
+
)
|
| 464 |
+
inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
|
| 465 |
+
attention_mask = torch.cat(
|
| 466 |
+
[language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
# add image_embeds length to max_length, so that the final max_length in counted only on token embeds
|
| 470 |
+
# -1 is to account for the prepended BOS after `generate.`
|
| 471 |
+
if not self.language_model.config.is_encoder_decoder:
|
| 472 |
+
generate_kwargs["max_length"] = (
|
| 473 |
+
generate_kwargs.get("max_length", 20) + language_model_inputs.shape[1] - 1
|
| 474 |
+
)
|
| 475 |
+
generate_kwargs["min_length"] = generate_kwargs.get("min_length", 0) + language_model_inputs.shape[1]
|
| 476 |
+
|
| 477 |
+
inputs = {"inputs_embeds": inputs_embeds, "attention_mask": attention_mask}
|
| 478 |
+
if not self.language_model.config.is_encoder_decoder:
|
| 479 |
+
inputs["input_ids"] = input_ids
|
| 480 |
+
|
| 481 |
+
outputs = self.language_model.generate(**inputs, **generate_kwargs)
|
| 482 |
+
|
| 483 |
+
return outputs
|
janus/lib/python3.10/site-packages/transformers/models/pixtral/__init__.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import _LazyModule
|
| 17 |
+
from ...utils.import_utils import define_import_structure
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from .configuration_pixtral import *
|
| 22 |
+
from .image_processing_pixtral import *
|
| 23 |
+
from .image_processing_pixtral_fast import *
|
| 24 |
+
from .modeling_pixtral import *
|
| 25 |
+
from .processing_pixtral import *
|
| 26 |
+
else:
|
| 27 |
+
import sys
|
| 28 |
+
|
| 29 |
+
_file = globals()["__file__"]
|
| 30 |
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
janus/lib/python3.10/site-packages/transformers/models/pixtral/__pycache__/image_processing_pixtral_fast.cpython-310.pyc
ADDED
|
Binary file (13.6 kB). View file
|
|
|
janus/lib/python3.10/site-packages/transformers/models/pixtral/image_processing_pixtral_fast.py
ADDED
|
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Image processor class for Pixtral."""
|
| 16 |
+
|
| 17 |
+
from typing import Dict, List, Optional, Union
|
| 18 |
+
|
| 19 |
+
from ...image_processing_utils import get_size_dict
|
| 20 |
+
from ...image_processing_utils_fast import BaseImageProcessorFast
|
| 21 |
+
from ...image_utils import (
|
| 22 |
+
ChannelDimension,
|
| 23 |
+
ImageInput,
|
| 24 |
+
ImageType,
|
| 25 |
+
PILImageResampling,
|
| 26 |
+
get_image_size,
|
| 27 |
+
get_image_type,
|
| 28 |
+
infer_channel_dimension_format,
|
| 29 |
+
validate_fast_preprocess_arguments,
|
| 30 |
+
validate_kwargs,
|
| 31 |
+
)
|
| 32 |
+
from ...utils import (
|
| 33 |
+
TensorType,
|
| 34 |
+
is_torch_available,
|
| 35 |
+
is_torchvision_available,
|
| 36 |
+
is_torchvision_v2_available,
|
| 37 |
+
is_vision_available,
|
| 38 |
+
logging,
|
| 39 |
+
)
|
| 40 |
+
from .image_processing_pixtral import (
|
| 41 |
+
BatchMixFeature,
|
| 42 |
+
convert_to_rgb,
|
| 43 |
+
get_resize_output_image_size,
|
| 44 |
+
make_list_of_images,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
logger = logging.get_logger(__name__)
|
| 49 |
+
|
| 50 |
+
if is_torch_available():
|
| 51 |
+
import torch
|
| 52 |
+
|
| 53 |
+
if is_torchvision_available():
|
| 54 |
+
if is_vision_available():
|
| 55 |
+
from ...image_utils import pil_torch_interpolation_mapping
|
| 56 |
+
|
| 57 |
+
if is_torchvision_v2_available():
|
| 58 |
+
from torchvision.transforms.v2 import functional as F
|
| 59 |
+
else:
|
| 60 |
+
from torchvision.transforms import functional as F
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class PixtralImageProcessorFast(BaseImageProcessorFast):
|
| 64 |
+
r"""
|
| 65 |
+
Constructs a fast Pixtral image processor that leverages torchvision.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
| 69 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
|
| 70 |
+
`do_resize` in the `preprocess` method.
|
| 71 |
+
size (`Dict[str, int]` *optional*, defaults to `{"longest_edge": 1024}`):
|
| 72 |
+
Size of the maximum dimension of either the height or width dimension of the image. Used to control how
|
| 73 |
+
images are resized. If either the height or width are greater than `size["longest_edge"]` then both the height and width are rescaled by `height / ratio`, `width /ratio` where `ratio = max(height / longest_edge, width / longest_edge)`
|
| 74 |
+
patch_size (`Dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`):
|
| 75 |
+
Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method.
|
| 76 |
+
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
|
| 77 |
+
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
|
| 78 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
| 79 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
|
| 80 |
+
the `preprocess` method.
|
| 81 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
| 82 |
+
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
|
| 83 |
+
method.
|
| 84 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
| 85 |
+
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
|
| 86 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
|
| 87 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
| 88 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
| 89 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
|
| 90 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
| 91 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 92 |
+
Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 93 |
+
do_convert_rgb (`bool`, *optional*, defaults to `True`):
|
| 94 |
+
Whether to convert the image to RGB.
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
model_input_names = ["pixel_values"]
|
| 98 |
+
|
| 99 |
+
def __init__(
|
| 100 |
+
self,
|
| 101 |
+
do_resize: bool = True,
|
| 102 |
+
size: Dict[str, int] = None,
|
| 103 |
+
patch_size: Dict[str, int] = None,
|
| 104 |
+
resample: Union[PILImageResampling, "F.InterpolationMode"] = PILImageResampling.BICUBIC,
|
| 105 |
+
do_rescale: bool = True,
|
| 106 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
| 107 |
+
do_normalize: bool = True,
|
| 108 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 109 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 110 |
+
do_convert_rgb: bool = True,
|
| 111 |
+
**kwargs,
|
| 112 |
+
) -> None:
|
| 113 |
+
super().__init__(**kwargs)
|
| 114 |
+
size = size if size is not None else {"longest_edge": 1024}
|
| 115 |
+
patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
|
| 116 |
+
patch_size = get_size_dict(patch_size, default_to_square=True)
|
| 117 |
+
|
| 118 |
+
self.do_resize = do_resize
|
| 119 |
+
self.size = size
|
| 120 |
+
self.patch_size = patch_size
|
| 121 |
+
self.resample = resample
|
| 122 |
+
self.do_rescale = do_rescale
|
| 123 |
+
self.rescale_factor = rescale_factor
|
| 124 |
+
self.do_normalize = do_normalize
|
| 125 |
+
self.image_mean = image_mean if image_mean is not None else [0.48145466, 0.4578275, 0.40821073]
|
| 126 |
+
self.image_std = image_std if image_std is not None else [0.26862954, 0.26130258, 0.27577711]
|
| 127 |
+
self.do_convert_rgb = do_convert_rgb
|
| 128 |
+
self._valid_processor_keys = [
|
| 129 |
+
"images",
|
| 130 |
+
"do_resize",
|
| 131 |
+
"size",
|
| 132 |
+
"patch_size",
|
| 133 |
+
"resample",
|
| 134 |
+
"do_rescale",
|
| 135 |
+
"rescale_factor",
|
| 136 |
+
"do_normalize",
|
| 137 |
+
"image_mean",
|
| 138 |
+
"image_std",
|
| 139 |
+
"do_convert_rgb",
|
| 140 |
+
"return_tensors",
|
| 141 |
+
"data_format",
|
| 142 |
+
"input_data_format",
|
| 143 |
+
]
|
| 144 |
+
|
| 145 |
+
def resize(
|
| 146 |
+
self,
|
| 147 |
+
image: torch.Tensor,
|
| 148 |
+
size: Dict[str, int],
|
| 149 |
+
patch_size: Dict[str, int],
|
| 150 |
+
interpolation: "F.InterpolationMode" = None,
|
| 151 |
+
**kwargs,
|
| 152 |
+
) -> torch.Tensor:
|
| 153 |
+
"""
|
| 154 |
+
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
|
| 155 |
+
resized to keep the input aspect ratio.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
image (`torch.Tensor`):
|
| 159 |
+
Image to resize.
|
| 160 |
+
size (`Dict[str, int]`):
|
| 161 |
+
Dict containing the longest possible edge of the image.
|
| 162 |
+
patch_size (`Dict[str, int]`):
|
| 163 |
+
Patch size used to calculate the size of the output image.
|
| 164 |
+
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
|
| 165 |
+
Resampling filter to use when resiizing the image.
|
| 166 |
+
"""
|
| 167 |
+
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
|
| 168 |
+
if "longest_edge" in size:
|
| 169 |
+
size = (size["longest_edge"], size["longest_edge"])
|
| 170 |
+
elif "height" in size and "width" in size:
|
| 171 |
+
size = (size["height"], size["width"])
|
| 172 |
+
else:
|
| 173 |
+
raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.")
|
| 174 |
+
|
| 175 |
+
if "height" in patch_size and "width" in patch_size:
|
| 176 |
+
patch_size = (patch_size["height"], patch_size["width"])
|
| 177 |
+
else:
|
| 178 |
+
raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.")
|
| 179 |
+
|
| 180 |
+
output_size = get_resize_output_image_size(
|
| 181 |
+
image,
|
| 182 |
+
size=size,
|
| 183 |
+
patch_size=patch_size,
|
| 184 |
+
)
|
| 185 |
+
return F.resize(
|
| 186 |
+
image,
|
| 187 |
+
size=output_size,
|
| 188 |
+
interpolation=interpolation,
|
| 189 |
+
**kwargs,
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
def preprocess(
|
| 193 |
+
self,
|
| 194 |
+
images: ImageInput,
|
| 195 |
+
do_resize: bool = None,
|
| 196 |
+
size: Dict[str, int] = None,
|
| 197 |
+
patch_size: Dict[str, int] = None,
|
| 198 |
+
resample: Optional[Union[PILImageResampling, "F.InterpolationMode"]] = None,
|
| 199 |
+
do_rescale: bool = None,
|
| 200 |
+
rescale_factor: float = None,
|
| 201 |
+
do_normalize: bool = None,
|
| 202 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 203 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 204 |
+
do_convert_rgb: bool = None,
|
| 205 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
| 206 |
+
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
| 207 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 208 |
+
**kwargs,
|
| 209 |
+
) -> BatchMixFeature:
|
| 210 |
+
"""
|
| 211 |
+
Preprocess an image or batch of images.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
images (`ImageInput`):
|
| 215 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
| 216 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
| 217 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
| 218 |
+
Whether to resize the image.
|
| 219 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
| 220 |
+
Describes the maximum input dimensions to the model.
|
| 221 |
+
patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
|
| 222 |
+
Patch size in the model. Used to calculate the image after resizing.
|
| 223 |
+
resample (`PILImageResampling` or `InterpolationMode`, *optional*, defaults to self.resample):
|
| 224 |
+
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
|
| 225 |
+
has an effect if `do_resize` is set to `True`.
|
| 226 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
| 227 |
+
Whether to rescale the image.
|
| 228 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
| 229 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
| 230 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
| 231 |
+
Whether to normalize the image.
|
| 232 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
| 233 |
+
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
|
| 234 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
| 235 |
+
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
|
| 236 |
+
`True`.
|
| 237 |
+
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
|
| 238 |
+
Whether to convert the image to RGB.
|
| 239 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
| 240 |
+
The type of tensors to return. Can be one of:
|
| 241 |
+
- Unset: Return a list of `np.ndarray`.
|
| 242 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
| 243 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
| 244 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
| 245 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
| 246 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
| 247 |
+
The channel dimension format for the output image. Can be one of:
|
| 248 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 249 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 250 |
+
- Unset: Use the channel dimension format of the input image.
|
| 251 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 252 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
| 253 |
+
from the input image. Can be one of:
|
| 254 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 255 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 256 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
| 257 |
+
"""
|
| 258 |
+
patch_size = patch_size if patch_size is not None else self.patch_size
|
| 259 |
+
patch_size = get_size_dict(patch_size, default_to_square=True)
|
| 260 |
+
|
| 261 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
| 262 |
+
size = size if size is not None else self.size
|
| 263 |
+
resample = resample if resample is not None else self.resample
|
| 264 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
| 265 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
| 266 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
| 267 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
| 268 |
+
image_std = image_std if image_std is not None else self.image_std
|
| 269 |
+
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
|
| 270 |
+
device = kwargs.pop("device", None)
|
| 271 |
+
|
| 272 |
+
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
|
| 273 |
+
|
| 274 |
+
images_list = make_list_of_images(images)
|
| 275 |
+
image_type = get_image_type(images_list[0][0])
|
| 276 |
+
|
| 277 |
+
if image_type not in [ImageType.PIL, ImageType.TORCH, ImageType.NUMPY]:
|
| 278 |
+
raise ValueError(f"Unsupported input image type {image_type}")
|
| 279 |
+
|
| 280 |
+
validate_fast_preprocess_arguments(
|
| 281 |
+
do_rescale=do_rescale,
|
| 282 |
+
rescale_factor=rescale_factor,
|
| 283 |
+
do_normalize=do_normalize,
|
| 284 |
+
image_mean=image_mean,
|
| 285 |
+
image_std=image_std,
|
| 286 |
+
do_resize=do_resize,
|
| 287 |
+
size=size,
|
| 288 |
+
resample=resample,
|
| 289 |
+
return_tensors=return_tensors,
|
| 290 |
+
data_format=data_format,
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
if do_convert_rgb:
|
| 294 |
+
images_list = [[convert_to_rgb(image) for image in images] for images in images_list]
|
| 295 |
+
|
| 296 |
+
if image_type == ImageType.PIL:
|
| 297 |
+
images_list = [[F.pil_to_tensor(image) for image in images] for images in images_list]
|
| 298 |
+
elif image_type == ImageType.NUMPY:
|
| 299 |
+
# not using F.to_tensor as it doesn't handle (C, H, W) numpy arrays
|
| 300 |
+
images_list = [[torch.from_numpy(image).contiguous() for image in images] for images in images_list]
|
| 301 |
+
|
| 302 |
+
if device is not None:
|
| 303 |
+
images_list = [[image.to(device) for image in images] for images in images_list]
|
| 304 |
+
|
| 305 |
+
# We assume that all images have the same channel dimension format.
|
| 306 |
+
if input_data_format is None:
|
| 307 |
+
input_data_format = infer_channel_dimension_format(images_list[0][0])
|
| 308 |
+
if input_data_format == ChannelDimension.LAST:
|
| 309 |
+
images_list = [[image.permute(2, 0, 1).contiguous() for image in images] for images in images_list]
|
| 310 |
+
input_data_format = ChannelDimension.FIRST
|
| 311 |
+
|
| 312 |
+
if do_rescale and do_normalize:
|
| 313 |
+
# fused rescale and normalize
|
| 314 |
+
new_mean = torch.tensor(image_mean, device=images_list[0][0].device) * (1.0 / rescale_factor)
|
| 315 |
+
new_std = torch.tensor(image_std, device=images_list[0][0].device) * (1.0 / rescale_factor)
|
| 316 |
+
|
| 317 |
+
batch_images = []
|
| 318 |
+
batch_image_sizes = []
|
| 319 |
+
for sample_images in images_list:
|
| 320 |
+
images = []
|
| 321 |
+
image_sizes = []
|
| 322 |
+
for image in sample_images:
|
| 323 |
+
if do_resize:
|
| 324 |
+
interpolation = (
|
| 325 |
+
pil_torch_interpolation_mapping[resample]
|
| 326 |
+
if isinstance(resample, (PILImageResampling, int))
|
| 327 |
+
else resample
|
| 328 |
+
)
|
| 329 |
+
image = self.resize(
|
| 330 |
+
image=image,
|
| 331 |
+
size=size,
|
| 332 |
+
patch_size=patch_size,
|
| 333 |
+
interpolation=interpolation,
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
if do_rescale and do_normalize:
|
| 337 |
+
# fused rescale and normalize
|
| 338 |
+
image = F.normalize(image.to(dtype=torch.float32), new_mean, new_std)
|
| 339 |
+
elif do_rescale:
|
| 340 |
+
image = image * rescale_factor
|
| 341 |
+
elif do_normalize:
|
| 342 |
+
image = F.normalize(image, image_mean, image_std)
|
| 343 |
+
|
| 344 |
+
images.append(image)
|
| 345 |
+
image_sizes.append(get_image_size(image, input_data_format))
|
| 346 |
+
batch_images.append(images)
|
| 347 |
+
batch_image_sizes.append(image_sizes)
|
| 348 |
+
|
| 349 |
+
return BatchMixFeature(
|
| 350 |
+
data={"pixel_values": batch_images, "image_sizes": batch_image_sizes},
|
| 351 |
+
tensor_type=None,
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
__all__ = ["PixtralImageProcessorFast"]
|