Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/__init__.py +257 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__init__.py +112 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/convert_beit_unilm_to_pytorch.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/feature_extraction_beit.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_beit.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/configuration_beit.py +235 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py +374 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/feature_extraction_beit.py +33 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/image_processing_beit.py +507 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/modeling_beit.py +1427 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py +948 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__init__.py +130 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/configuration_convbert.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_convbert.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_tf_convbert.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert_fast.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/configuration_convbert.py +166 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py +57 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/modeling_convbert.py +1341 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/modeling_tf_convbert.py +1472 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert.py +529 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert_fast.py +198 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/dialogpt/__init__.py +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py +46 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__init__.py +168 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/configuration_electra.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/convert_electra_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_electra.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_flax_electra.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_tf_electra.cpython-310.pyc +0 -0
deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (5.66 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc
ADDED
|
Binary file (15.9 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc
ADDED
|
Binary file (28.7 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc
ADDED
|
Binary file (12.2 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc
ADDED
|
Binary file (41.8 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc
ADDED
|
Binary file (18 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc
ADDED
|
Binary file (27.5 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc
ADDED
|
Binary file (7.3 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc
ADDED
|
Binary file (7.78 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc
ADDED
|
Binary file (26.7 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/__init__.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from . import (
|
| 16 |
+
albert,
|
| 17 |
+
align,
|
| 18 |
+
altclip,
|
| 19 |
+
audio_spectrogram_transformer,
|
| 20 |
+
auto,
|
| 21 |
+
autoformer,
|
| 22 |
+
bark,
|
| 23 |
+
bart,
|
| 24 |
+
barthez,
|
| 25 |
+
bartpho,
|
| 26 |
+
beit,
|
| 27 |
+
bert,
|
| 28 |
+
bert_generation,
|
| 29 |
+
bert_japanese,
|
| 30 |
+
bertweet,
|
| 31 |
+
big_bird,
|
| 32 |
+
bigbird_pegasus,
|
| 33 |
+
biogpt,
|
| 34 |
+
bit,
|
| 35 |
+
blenderbot,
|
| 36 |
+
blenderbot_small,
|
| 37 |
+
blip,
|
| 38 |
+
blip_2,
|
| 39 |
+
bloom,
|
| 40 |
+
bridgetower,
|
| 41 |
+
bros,
|
| 42 |
+
byt5,
|
| 43 |
+
camembert,
|
| 44 |
+
canine,
|
| 45 |
+
chinese_clip,
|
| 46 |
+
clap,
|
| 47 |
+
clip,
|
| 48 |
+
clipseg,
|
| 49 |
+
clvp,
|
| 50 |
+
code_llama,
|
| 51 |
+
codegen,
|
| 52 |
+
conditional_detr,
|
| 53 |
+
convbert,
|
| 54 |
+
convnext,
|
| 55 |
+
convnextv2,
|
| 56 |
+
cpm,
|
| 57 |
+
cpmant,
|
| 58 |
+
ctrl,
|
| 59 |
+
cvt,
|
| 60 |
+
data2vec,
|
| 61 |
+
deberta,
|
| 62 |
+
deberta_v2,
|
| 63 |
+
decision_transformer,
|
| 64 |
+
deformable_detr,
|
| 65 |
+
deit,
|
| 66 |
+
deprecated,
|
| 67 |
+
depth_anything,
|
| 68 |
+
deta,
|
| 69 |
+
detr,
|
| 70 |
+
dialogpt,
|
| 71 |
+
dinat,
|
| 72 |
+
dinov2,
|
| 73 |
+
distilbert,
|
| 74 |
+
dit,
|
| 75 |
+
donut,
|
| 76 |
+
dpr,
|
| 77 |
+
dpt,
|
| 78 |
+
efficientformer,
|
| 79 |
+
efficientnet,
|
| 80 |
+
electra,
|
| 81 |
+
encodec,
|
| 82 |
+
encoder_decoder,
|
| 83 |
+
ernie,
|
| 84 |
+
ernie_m,
|
| 85 |
+
esm,
|
| 86 |
+
falcon,
|
| 87 |
+
fastspeech2_conformer,
|
| 88 |
+
flaubert,
|
| 89 |
+
flava,
|
| 90 |
+
fnet,
|
| 91 |
+
focalnet,
|
| 92 |
+
fsmt,
|
| 93 |
+
funnel,
|
| 94 |
+
fuyu,
|
| 95 |
+
gemma,
|
| 96 |
+
git,
|
| 97 |
+
glpn,
|
| 98 |
+
gpt2,
|
| 99 |
+
gpt_bigcode,
|
| 100 |
+
gpt_neo,
|
| 101 |
+
gpt_neox,
|
| 102 |
+
gpt_neox_japanese,
|
| 103 |
+
gpt_sw3,
|
| 104 |
+
gptj,
|
| 105 |
+
gptsan_japanese,
|
| 106 |
+
graphormer,
|
| 107 |
+
groupvit,
|
| 108 |
+
herbert,
|
| 109 |
+
hubert,
|
| 110 |
+
ibert,
|
| 111 |
+
idefics,
|
| 112 |
+
imagegpt,
|
| 113 |
+
informer,
|
| 114 |
+
instructblip,
|
| 115 |
+
jukebox,
|
| 116 |
+
kosmos2,
|
| 117 |
+
layoutlm,
|
| 118 |
+
layoutlmv2,
|
| 119 |
+
layoutlmv3,
|
| 120 |
+
layoutxlm,
|
| 121 |
+
led,
|
| 122 |
+
levit,
|
| 123 |
+
lilt,
|
| 124 |
+
llama,
|
| 125 |
+
llava,
|
| 126 |
+
longformer,
|
| 127 |
+
longt5,
|
| 128 |
+
luke,
|
| 129 |
+
lxmert,
|
| 130 |
+
m2m_100,
|
| 131 |
+
marian,
|
| 132 |
+
markuplm,
|
| 133 |
+
mask2former,
|
| 134 |
+
maskformer,
|
| 135 |
+
mbart,
|
| 136 |
+
mbart50,
|
| 137 |
+
mega,
|
| 138 |
+
megatron_bert,
|
| 139 |
+
megatron_gpt2,
|
| 140 |
+
mgp_str,
|
| 141 |
+
mistral,
|
| 142 |
+
mixtral,
|
| 143 |
+
mluke,
|
| 144 |
+
mobilebert,
|
| 145 |
+
mobilenet_v1,
|
| 146 |
+
mobilenet_v2,
|
| 147 |
+
mobilevit,
|
| 148 |
+
mobilevitv2,
|
| 149 |
+
mpnet,
|
| 150 |
+
mpt,
|
| 151 |
+
mra,
|
| 152 |
+
mt5,
|
| 153 |
+
musicgen,
|
| 154 |
+
mvp,
|
| 155 |
+
nat,
|
| 156 |
+
nezha,
|
| 157 |
+
nllb,
|
| 158 |
+
nllb_moe,
|
| 159 |
+
nougat,
|
| 160 |
+
nystromformer,
|
| 161 |
+
oneformer,
|
| 162 |
+
openai,
|
| 163 |
+
opt,
|
| 164 |
+
owlv2,
|
| 165 |
+
owlvit,
|
| 166 |
+
patchtsmixer,
|
| 167 |
+
patchtst,
|
| 168 |
+
pegasus,
|
| 169 |
+
pegasus_x,
|
| 170 |
+
perceiver,
|
| 171 |
+
persimmon,
|
| 172 |
+
phi,
|
| 173 |
+
phobert,
|
| 174 |
+
pix2struct,
|
| 175 |
+
plbart,
|
| 176 |
+
poolformer,
|
| 177 |
+
pop2piano,
|
| 178 |
+
prophetnet,
|
| 179 |
+
pvt,
|
| 180 |
+
qdqbert,
|
| 181 |
+
qwen2,
|
| 182 |
+
rag,
|
| 183 |
+
realm,
|
| 184 |
+
reformer,
|
| 185 |
+
regnet,
|
| 186 |
+
rembert,
|
| 187 |
+
resnet,
|
| 188 |
+
roberta,
|
| 189 |
+
roberta_prelayernorm,
|
| 190 |
+
roc_bert,
|
| 191 |
+
roformer,
|
| 192 |
+
rwkv,
|
| 193 |
+
sam,
|
| 194 |
+
seamless_m4t,
|
| 195 |
+
seamless_m4t_v2,
|
| 196 |
+
segformer,
|
| 197 |
+
sew,
|
| 198 |
+
sew_d,
|
| 199 |
+
siglip,
|
| 200 |
+
speech_encoder_decoder,
|
| 201 |
+
speech_to_text,
|
| 202 |
+
speech_to_text_2,
|
| 203 |
+
speecht5,
|
| 204 |
+
splinter,
|
| 205 |
+
squeezebert,
|
| 206 |
+
stablelm,
|
| 207 |
+
swiftformer,
|
| 208 |
+
swin,
|
| 209 |
+
swin2sr,
|
| 210 |
+
swinv2,
|
| 211 |
+
switch_transformers,
|
| 212 |
+
t5,
|
| 213 |
+
table_transformer,
|
| 214 |
+
tapas,
|
| 215 |
+
time_series_transformer,
|
| 216 |
+
timesformer,
|
| 217 |
+
timm_backbone,
|
| 218 |
+
trocr,
|
| 219 |
+
tvlt,
|
| 220 |
+
tvp,
|
| 221 |
+
umt5,
|
| 222 |
+
unispeech,
|
| 223 |
+
unispeech_sat,
|
| 224 |
+
univnet,
|
| 225 |
+
upernet,
|
| 226 |
+
videomae,
|
| 227 |
+
vilt,
|
| 228 |
+
vipllava,
|
| 229 |
+
vision_encoder_decoder,
|
| 230 |
+
vision_text_dual_encoder,
|
| 231 |
+
visual_bert,
|
| 232 |
+
vit,
|
| 233 |
+
vit_hybrid,
|
| 234 |
+
vit_mae,
|
| 235 |
+
vit_msn,
|
| 236 |
+
vitdet,
|
| 237 |
+
vitmatte,
|
| 238 |
+
vits,
|
| 239 |
+
vivit,
|
| 240 |
+
wav2vec2,
|
| 241 |
+
wav2vec2_bert,
|
| 242 |
+
wav2vec2_conformer,
|
| 243 |
+
wav2vec2_phoneme,
|
| 244 |
+
wav2vec2_with_lm,
|
| 245 |
+
wavlm,
|
| 246 |
+
whisper,
|
| 247 |
+
x_clip,
|
| 248 |
+
xglm,
|
| 249 |
+
xlm,
|
| 250 |
+
xlm_prophetnet,
|
| 251 |
+
xlm_roberta,
|
| 252 |
+
xlm_roberta_xl,
|
| 253 |
+
xlnet,
|
| 254 |
+
xmod,
|
| 255 |
+
yolos,
|
| 256 |
+
yoso,
|
| 257 |
+
)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__init__.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import TYPE_CHECKING
|
| 16 |
+
|
| 17 |
+
from ...utils import (
|
| 18 |
+
OptionalDependencyNotAvailable,
|
| 19 |
+
_LazyModule,
|
| 20 |
+
is_flax_available,
|
| 21 |
+
is_torch_available,
|
| 22 |
+
is_vision_available,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_import_structure = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
|
| 27 |
+
|
| 28 |
+
try:
|
| 29 |
+
if not is_vision_available():
|
| 30 |
+
raise OptionalDependencyNotAvailable()
|
| 31 |
+
except OptionalDependencyNotAvailable:
|
| 32 |
+
pass
|
| 33 |
+
else:
|
| 34 |
+
_import_structure["feature_extraction_beit"] = ["BeitFeatureExtractor"]
|
| 35 |
+
_import_structure["image_processing_beit"] = ["BeitImageProcessor"]
|
| 36 |
+
|
| 37 |
+
try:
|
| 38 |
+
if not is_torch_available():
|
| 39 |
+
raise OptionalDependencyNotAvailable()
|
| 40 |
+
except OptionalDependencyNotAvailable:
|
| 41 |
+
pass
|
| 42 |
+
else:
|
| 43 |
+
_import_structure["modeling_beit"] = [
|
| 44 |
+
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
| 45 |
+
"BeitForImageClassification",
|
| 46 |
+
"BeitForMaskedImageModeling",
|
| 47 |
+
"BeitForSemanticSegmentation",
|
| 48 |
+
"BeitModel",
|
| 49 |
+
"BeitPreTrainedModel",
|
| 50 |
+
"BeitBackbone",
|
| 51 |
+
]
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
try:
|
| 55 |
+
if not is_flax_available():
|
| 56 |
+
raise OptionalDependencyNotAvailable()
|
| 57 |
+
except OptionalDependencyNotAvailable:
|
| 58 |
+
pass
|
| 59 |
+
else:
|
| 60 |
+
_import_structure["modeling_flax_beit"] = [
|
| 61 |
+
"FlaxBeitForImageClassification",
|
| 62 |
+
"FlaxBeitForMaskedImageModeling",
|
| 63 |
+
"FlaxBeitModel",
|
| 64 |
+
"FlaxBeitPreTrainedModel",
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
if TYPE_CHECKING:
|
| 68 |
+
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
|
| 69 |
+
|
| 70 |
+
try:
|
| 71 |
+
if not is_vision_available():
|
| 72 |
+
raise OptionalDependencyNotAvailable()
|
| 73 |
+
except OptionalDependencyNotAvailable:
|
| 74 |
+
pass
|
| 75 |
+
else:
|
| 76 |
+
from .feature_extraction_beit import BeitFeatureExtractor
|
| 77 |
+
from .image_processing_beit import BeitImageProcessor
|
| 78 |
+
|
| 79 |
+
try:
|
| 80 |
+
if not is_torch_available():
|
| 81 |
+
raise OptionalDependencyNotAvailable()
|
| 82 |
+
except OptionalDependencyNotAvailable:
|
| 83 |
+
pass
|
| 84 |
+
else:
|
| 85 |
+
from .modeling_beit import (
|
| 86 |
+
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
| 87 |
+
BeitBackbone,
|
| 88 |
+
BeitForImageClassification,
|
| 89 |
+
BeitForMaskedImageModeling,
|
| 90 |
+
BeitForSemanticSegmentation,
|
| 91 |
+
BeitModel,
|
| 92 |
+
BeitPreTrainedModel,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
try:
|
| 96 |
+
if not is_flax_available():
|
| 97 |
+
raise OptionalDependencyNotAvailable()
|
| 98 |
+
except OptionalDependencyNotAvailable:
|
| 99 |
+
pass
|
| 100 |
+
else:
|
| 101 |
+
from .modeling_flax_beit import (
|
| 102 |
+
FlaxBeitForImageClassification,
|
| 103 |
+
FlaxBeitForMaskedImageModeling,
|
| 104 |
+
FlaxBeitModel,
|
| 105 |
+
FlaxBeitPreTrainedModel,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
else:
|
| 110 |
+
import sys
|
| 111 |
+
|
| 112 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.67 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/convert_beit_unilm_to_pytorch.cpython-310.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/feature_extraction_beit.cpython-310.pyc
ADDED
|
Binary file (986 Bytes). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc
ADDED
|
Binary file (18.2 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_beit.cpython-310.pyc
ADDED
|
Binary file (44.7 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc
ADDED
|
Binary file (28.3 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/configuration_beit.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" BEiT model configuration"""
|
| 16 |
+
from collections import OrderedDict
|
| 17 |
+
from typing import Mapping
|
| 18 |
+
|
| 19 |
+
from packaging import version
|
| 20 |
+
|
| 21 |
+
from ...configuration_utils import PretrainedConfig
|
| 22 |
+
from ...onnx import OnnxConfig
|
| 23 |
+
from ...utils import logging
|
| 24 |
+
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
logger = logging.get_logger(__name__)
|
| 28 |
+
|
| 29 |
+
BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 30 |
+
"microsoft/beit-base-patch16-224-pt22k": (
|
| 31 |
+
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
|
| 32 |
+
),
|
| 33 |
+
# See all BEiT models at https://huggingface.co/models?filter=beit
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class BeitConfig(BackboneConfigMixin, PretrainedConfig):
|
| 38 |
+
r"""
|
| 39 |
+
This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT
|
| 40 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 41 |
+
defaults will yield a similar configuration to that of the BEiT
|
| 42 |
+
[microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
vocab_size (`int`, *optional*, defaults to 8192):
|
| 46 |
+
Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during
|
| 47 |
+
pre-training.
|
| 48 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 49 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 50 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 51 |
+
Number of hidden layers in the Transformer encoder.
|
| 52 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 53 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 54 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 55 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 56 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 57 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 58 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
| 59 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
|
| 60 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 61 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
|
| 62 |
+
The dropout ratio for the attention probabilities.
|
| 63 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 64 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 65 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 66 |
+
The epsilon used by the layer normalization layers.
|
| 67 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 68 |
+
The size (resolution) of each image.
|
| 69 |
+
patch_size (`int`, *optional*, defaults to 16):
|
| 70 |
+
The size (resolution) of each patch.
|
| 71 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 72 |
+
The number of input channels.
|
| 73 |
+
use_mask_token (`bool`, *optional*, defaults to `False`):
|
| 74 |
+
Whether to use a mask token for masked image modeling.
|
| 75 |
+
use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`):
|
| 76 |
+
Whether to use BERT-style absolute position embeddings.
|
| 77 |
+
use_relative_position_bias (`bool`, *optional*, defaults to `False`):
|
| 78 |
+
Whether to use T5-style relative position embeddings in the self-attention layers.
|
| 79 |
+
use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`):
|
| 80 |
+
Whether to use the same relative position embeddings across all self-attention layers of the Transformer.
|
| 81 |
+
layer_scale_init_value (`float`, *optional*, defaults to 0.1):
|
| 82 |
+
Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale.
|
| 83 |
+
drop_path_rate (`float`, *optional*, defaults to 0.1):
|
| 84 |
+
Stochastic depth rate per sample (when applied in the main path of residual layers).
|
| 85 |
+
use_mean_pooling (`bool`, *optional*, defaults to `True`):
|
| 86 |
+
Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the
|
| 87 |
+
CLS token, before applying the classification head.
|
| 88 |
+
pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`):
|
| 89 |
+
Pooling scales used in Pooling Pyramid Module applied on the last feature map.
|
| 90 |
+
use_auxiliary_head (`bool`, *optional*, defaults to `True`):
|
| 91 |
+
Whether to use an auxiliary head during training.
|
| 92 |
+
auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
|
| 93 |
+
Weight of the cross-entropy loss of the auxiliary head.
|
| 94 |
+
auxiliary_channels (`int`, *optional*, defaults to 256):
|
| 95 |
+
Number of channels to use in the auxiliary head.
|
| 96 |
+
auxiliary_num_convs (`int`, *optional*, defaults to 1):
|
| 97 |
+
Number of convolutional layers to use in the auxiliary head.
|
| 98 |
+
auxiliary_concat_input (`bool`, *optional*, defaults to `False`):
|
| 99 |
+
Whether to concatenate the output of the auxiliary head with the input before the classification layer.
|
| 100 |
+
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
|
| 101 |
+
The index that is ignored by the loss function of the semantic segmentation model.
|
| 102 |
+
out_features (`List[str]`, *optional*):
|
| 103 |
+
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
|
| 104 |
+
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
|
| 105 |
+
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
|
| 106 |
+
same order as defined in the `stage_names` attribute.
|
| 107 |
+
out_indices (`List[int]`, *optional*):
|
| 108 |
+
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
|
| 109 |
+
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
|
| 110 |
+
If unset and `out_features` is unset, will default to the last stage. Must be in the
|
| 111 |
+
same order as defined in the `stage_names` attribute.
|
| 112 |
+
add_fpn (`bool`, *optional*, defaults to `False`):
|
| 113 |
+
Whether to add a FPN as part of the backbone. Only relevant for [`BeitBackbone`].
|
| 114 |
+
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
|
| 115 |
+
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
|
| 116 |
+
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
|
| 117 |
+
seq_len, hidden_size)`. Only relevant for [`BeitBackbone`].
|
| 118 |
+
|
| 119 |
+
Example:
|
| 120 |
+
|
| 121 |
+
```python
|
| 122 |
+
>>> from transformers import BeitConfig, BeitModel
|
| 123 |
+
|
| 124 |
+
>>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration
|
| 125 |
+
>>> configuration = BeitConfig()
|
| 126 |
+
|
| 127 |
+
>>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration
|
| 128 |
+
>>> model = BeitModel(configuration)
|
| 129 |
+
|
| 130 |
+
>>> # Accessing the model configuration
|
| 131 |
+
>>> configuration = model.config
|
| 132 |
+
```"""
|
| 133 |
+
|
| 134 |
+
model_type = "beit"
|
| 135 |
+
|
| 136 |
+
def __init__(
|
| 137 |
+
self,
|
| 138 |
+
vocab_size=8192,
|
| 139 |
+
hidden_size=768,
|
| 140 |
+
num_hidden_layers=12,
|
| 141 |
+
num_attention_heads=12,
|
| 142 |
+
intermediate_size=3072,
|
| 143 |
+
hidden_act="gelu",
|
| 144 |
+
hidden_dropout_prob=0.0,
|
| 145 |
+
attention_probs_dropout_prob=0.0,
|
| 146 |
+
initializer_range=0.02,
|
| 147 |
+
layer_norm_eps=1e-12,
|
| 148 |
+
image_size=224,
|
| 149 |
+
patch_size=16,
|
| 150 |
+
num_channels=3,
|
| 151 |
+
use_mask_token=False,
|
| 152 |
+
use_absolute_position_embeddings=False,
|
| 153 |
+
use_relative_position_bias=False,
|
| 154 |
+
use_shared_relative_position_bias=False,
|
| 155 |
+
layer_scale_init_value=0.1,
|
| 156 |
+
drop_path_rate=0.1,
|
| 157 |
+
use_mean_pooling=True,
|
| 158 |
+
pool_scales=[1, 2, 3, 6],
|
| 159 |
+
use_auxiliary_head=True,
|
| 160 |
+
auxiliary_loss_weight=0.4,
|
| 161 |
+
auxiliary_channels=256,
|
| 162 |
+
auxiliary_num_convs=1,
|
| 163 |
+
auxiliary_concat_input=False,
|
| 164 |
+
semantic_loss_ignore_index=255,
|
| 165 |
+
out_features=None,
|
| 166 |
+
out_indices=None,
|
| 167 |
+
add_fpn=False,
|
| 168 |
+
reshape_hidden_states=True,
|
| 169 |
+
**kwargs,
|
| 170 |
+
):
|
| 171 |
+
super().__init__(**kwargs)
|
| 172 |
+
|
| 173 |
+
self.vocab_size = vocab_size
|
| 174 |
+
self.hidden_size = hidden_size
|
| 175 |
+
self.num_hidden_layers = num_hidden_layers
|
| 176 |
+
self.num_attention_heads = num_attention_heads
|
| 177 |
+
self.intermediate_size = intermediate_size
|
| 178 |
+
self.hidden_act = hidden_act
|
| 179 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 180 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 181 |
+
self.initializer_range = initializer_range
|
| 182 |
+
self.layer_norm_eps = layer_norm_eps
|
| 183 |
+
|
| 184 |
+
self.image_size = image_size
|
| 185 |
+
self.patch_size = patch_size
|
| 186 |
+
self.num_channels = num_channels
|
| 187 |
+
self.use_mask_token = use_mask_token
|
| 188 |
+
self.use_absolute_position_embeddings = use_absolute_position_embeddings
|
| 189 |
+
self.use_relative_position_bias = use_relative_position_bias
|
| 190 |
+
self.use_shared_relative_position_bias = use_shared_relative_position_bias
|
| 191 |
+
self.layer_scale_init_value = layer_scale_init_value
|
| 192 |
+
self.drop_path_rate = drop_path_rate
|
| 193 |
+
self.use_mean_pooling = use_mean_pooling
|
| 194 |
+
# decode head attributes (semantic segmentation)
|
| 195 |
+
self.pool_scales = pool_scales
|
| 196 |
+
# auxiliary head attributes (semantic segmentation)
|
| 197 |
+
self.use_auxiliary_head = use_auxiliary_head
|
| 198 |
+
self.auxiliary_loss_weight = auxiliary_loss_weight
|
| 199 |
+
self.auxiliary_channels = auxiliary_channels
|
| 200 |
+
self.auxiliary_num_convs = auxiliary_num_convs
|
| 201 |
+
self.auxiliary_concat_input = auxiliary_concat_input
|
| 202 |
+
self.semantic_loss_ignore_index = semantic_loss_ignore_index
|
| 203 |
+
|
| 204 |
+
# handle backwards compatibility
|
| 205 |
+
if "segmentation_indices" in kwargs:
|
| 206 |
+
logger.warning(
|
| 207 |
+
"The `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.",
|
| 208 |
+
FutureWarning,
|
| 209 |
+
)
|
| 210 |
+
out_indices = kwargs.pop("segmentation_indices")
|
| 211 |
+
|
| 212 |
+
# backbone attributes
|
| 213 |
+
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)]
|
| 214 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 215 |
+
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
|
| 216 |
+
)
|
| 217 |
+
self.add_fpn = add_fpn
|
| 218 |
+
self.reshape_hidden_states = reshape_hidden_states
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
# Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig
|
| 222 |
+
class BeitOnnxConfig(OnnxConfig):
|
| 223 |
+
torch_onnx_minimum_version = version.parse("1.11")
|
| 224 |
+
|
| 225 |
+
@property
|
| 226 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
| 227 |
+
return OrderedDict(
|
| 228 |
+
[
|
| 229 |
+
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
|
| 230 |
+
]
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
@property
|
| 234 |
+
def atol_for_validation(self) -> float:
|
| 235 |
+
return 1e-4
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Convert BEiT checkpoints from the unilm repository."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import json
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
|
| 22 |
+
import requests
|
| 23 |
+
import torch
|
| 24 |
+
from datasets import load_dataset
|
| 25 |
+
from huggingface_hub import hf_hub_download
|
| 26 |
+
from PIL import Image
|
| 27 |
+
|
| 28 |
+
from transformers import (
|
| 29 |
+
BeitConfig,
|
| 30 |
+
BeitForImageClassification,
|
| 31 |
+
BeitForMaskedImageModeling,
|
| 32 |
+
BeitForSemanticSegmentation,
|
| 33 |
+
BeitImageProcessor,
|
| 34 |
+
)
|
| 35 |
+
from transformers.image_utils import PILImageResampling
|
| 36 |
+
from transformers.utils import logging
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
logging.set_verbosity_info()
|
| 40 |
+
logger = logging.get_logger(__name__)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# here we list all keys to be renamed (original name on the left, our name on the right)
|
| 44 |
+
def create_rename_keys(config, has_lm_head=False, is_semantic=False):
|
| 45 |
+
prefix = "backbone." if is_semantic else ""
|
| 46 |
+
|
| 47 |
+
rename_keys = []
|
| 48 |
+
for i in range(config.num_hidden_layers):
|
| 49 |
+
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
|
| 50 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight"))
|
| 51 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias"))
|
| 52 |
+
rename_keys.append(
|
| 53 |
+
(f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight")
|
| 54 |
+
)
|
| 55 |
+
rename_keys.append(
|
| 56 |
+
(f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias")
|
| 57 |
+
)
|
| 58 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight"))
|
| 59 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias"))
|
| 60 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight"))
|
| 61 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias"))
|
| 62 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight"))
|
| 63 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias"))
|
| 64 |
+
|
| 65 |
+
# projection layer + position embeddings
|
| 66 |
+
rename_keys.extend(
|
| 67 |
+
[
|
| 68 |
+
(f"{prefix}cls_token", "beit.embeddings.cls_token"),
|
| 69 |
+
(f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
|
| 70 |
+
(f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
|
| 71 |
+
]
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
if has_lm_head:
|
| 75 |
+
# mask token + shared relative position bias + layernorm
|
| 76 |
+
rename_keys.extend(
|
| 77 |
+
[
|
| 78 |
+
("mask_token", "beit.embeddings.mask_token"),
|
| 79 |
+
(
|
| 80 |
+
"rel_pos_bias.relative_position_bias_table",
|
| 81 |
+
"beit.encoder.relative_position_bias.relative_position_bias_table",
|
| 82 |
+
),
|
| 83 |
+
(
|
| 84 |
+
"rel_pos_bias.relative_position_index",
|
| 85 |
+
"beit.encoder.relative_position_bias.relative_position_index",
|
| 86 |
+
),
|
| 87 |
+
("norm.weight", "layernorm.weight"),
|
| 88 |
+
("norm.bias", "layernorm.bias"),
|
| 89 |
+
]
|
| 90 |
+
)
|
| 91 |
+
elif is_semantic:
|
| 92 |
+
# semantic segmentation classification heads
|
| 93 |
+
rename_keys.extend(
|
| 94 |
+
[
|
| 95 |
+
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
|
| 96 |
+
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
|
| 97 |
+
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
|
| 98 |
+
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
|
| 99 |
+
]
|
| 100 |
+
)
|
| 101 |
+
else:
|
| 102 |
+
# layernorm + classification head
|
| 103 |
+
rename_keys.extend(
|
| 104 |
+
[
|
| 105 |
+
("fc_norm.weight", "beit.pooler.layernorm.weight"),
|
| 106 |
+
("fc_norm.bias", "beit.pooler.layernorm.bias"),
|
| 107 |
+
("head.weight", "classifier.weight"),
|
| 108 |
+
("head.bias", "classifier.bias"),
|
| 109 |
+
]
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
return rename_keys
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# we split up the matrix of each encoder layer into queries, keys and values
|
| 116 |
+
def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False):
|
| 117 |
+
for i in range(config.num_hidden_layers):
|
| 118 |
+
prefix = "backbone." if is_semantic else ""
|
| 119 |
+
# queries, keys and values
|
| 120 |
+
in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
|
| 121 |
+
q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
|
| 122 |
+
v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
|
| 123 |
+
|
| 124 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
|
| 125 |
+
: config.hidden_size, :
|
| 126 |
+
]
|
| 127 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
|
| 128 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
|
| 129 |
+
config.hidden_size : config.hidden_size * 2, :
|
| 130 |
+
]
|
| 131 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
|
| 132 |
+
-config.hidden_size :, :
|
| 133 |
+
]
|
| 134 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
|
| 135 |
+
|
| 136 |
+
# gamma_1 and gamma_2
|
| 137 |
+
# we call them lambda because otherwise they are renamed when using .from_pretrained
|
| 138 |
+
gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
|
| 139 |
+
gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
|
| 140 |
+
|
| 141 |
+
state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1
|
| 142 |
+
state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2
|
| 143 |
+
|
| 144 |
+
# relative_position bias table + index
|
| 145 |
+
if not has_lm_head:
|
| 146 |
+
# each layer has its own relative position bias
|
| 147 |
+
table = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_bias_table")
|
| 148 |
+
index = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_index")
|
| 149 |
+
|
| 150 |
+
state_dict[
|
| 151 |
+
f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table"
|
| 152 |
+
] = table
|
| 153 |
+
state_dict[
|
| 154 |
+
f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index"
|
| 155 |
+
] = index
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def rename_key(dct, old, new):
|
| 159 |
+
val = dct.pop(old)
|
| 160 |
+
dct[new] = val
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
# We will verify our results on an image of cute cats
|
| 164 |
+
def prepare_img():
|
| 165 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 166 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
| 167 |
+
return im
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@torch.no_grad()
|
| 171 |
+
def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path):
|
| 172 |
+
"""
|
| 173 |
+
Copy/paste/tweak model's weights to our BEiT structure.
|
| 174 |
+
"""
|
| 175 |
+
|
| 176 |
+
# define default BEiT configuration
|
| 177 |
+
config = BeitConfig()
|
| 178 |
+
has_lm_head = False
|
| 179 |
+
is_semantic = False
|
| 180 |
+
repo_id = "huggingface/label-files"
|
| 181 |
+
# set config parameters based on URL
|
| 182 |
+
if checkpoint_url[-9:-4] == "pt22k":
|
| 183 |
+
# masked image modeling
|
| 184 |
+
config.use_shared_relative_position_bias = True
|
| 185 |
+
config.use_mask_token = True
|
| 186 |
+
has_lm_head = True
|
| 187 |
+
elif checkpoint_url[-9:-4] == "ft22k":
|
| 188 |
+
# intermediate fine-tuning on ImageNet-22k
|
| 189 |
+
config.use_relative_position_bias = True
|
| 190 |
+
config.num_labels = 21841
|
| 191 |
+
filename = "imagenet-22k-id2label.json"
|
| 192 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
| 193 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
| 194 |
+
# this dataset contains 21843 labels but the model only has 21841
|
| 195 |
+
# we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
|
| 196 |
+
del id2label[9205]
|
| 197 |
+
del id2label[15027]
|
| 198 |
+
config.id2label = id2label
|
| 199 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
| 200 |
+
elif checkpoint_url[-8:-4] == "to1k":
|
| 201 |
+
# fine-tuning on ImageNet-1k
|
| 202 |
+
config.use_relative_position_bias = True
|
| 203 |
+
config.num_labels = 1000
|
| 204 |
+
filename = "imagenet-1k-id2label.json"
|
| 205 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
| 206 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
| 207 |
+
config.id2label = id2label
|
| 208 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
| 209 |
+
if "384" in checkpoint_url:
|
| 210 |
+
config.image_size = 384
|
| 211 |
+
if "512" in checkpoint_url:
|
| 212 |
+
config.image_size = 512
|
| 213 |
+
elif "ade20k" in checkpoint_url:
|
| 214 |
+
# fine-tuning
|
| 215 |
+
config.use_relative_position_bias = True
|
| 216 |
+
config.num_labels = 150
|
| 217 |
+
filename = "ade20k-id2label.json"
|
| 218 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
| 219 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
| 220 |
+
config.id2label = id2label
|
| 221 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
| 222 |
+
config.image_size = 640
|
| 223 |
+
is_semantic = True
|
| 224 |
+
else:
|
| 225 |
+
raise ValueError("Checkpoint not supported, URL should either end with 'pt22k', 'ft22k', 'to1k' or 'ade20k'")
|
| 226 |
+
|
| 227 |
+
# size of the architecture
|
| 228 |
+
if "base" in checkpoint_url:
|
| 229 |
+
pass
|
| 230 |
+
elif "large" in checkpoint_url:
|
| 231 |
+
config.hidden_size = 1024
|
| 232 |
+
config.intermediate_size = 4096
|
| 233 |
+
config.num_hidden_layers = 24
|
| 234 |
+
config.num_attention_heads = 16
|
| 235 |
+
if "ade20k" in checkpoint_url:
|
| 236 |
+
config.image_size = 640
|
| 237 |
+
config.out_indices = [7, 11, 15, 23]
|
| 238 |
+
else:
|
| 239 |
+
raise ValueError("Should either find 'base' or 'large' in checkpoint URL")
|
| 240 |
+
|
| 241 |
+
# load state_dict of original model, remove and rename some keys
|
| 242 |
+
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", check_hash=True)
|
| 243 |
+
state_dict = state_dict["model"] if "ade20k" not in checkpoint_url else state_dict["state_dict"]
|
| 244 |
+
|
| 245 |
+
rename_keys = create_rename_keys(config, has_lm_head=has_lm_head, is_semantic=is_semantic)
|
| 246 |
+
for src, dest in rename_keys:
|
| 247 |
+
rename_key(state_dict, src, dest)
|
| 248 |
+
read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head, is_semantic=is_semantic)
|
| 249 |
+
if is_semantic:
|
| 250 |
+
# add prefix to decoder keys
|
| 251 |
+
for key, val in state_dict.copy().items():
|
| 252 |
+
val = state_dict.pop(key)
|
| 253 |
+
if key.startswith("backbone.fpn"):
|
| 254 |
+
key = key.replace("backbone.fpn", "fpn")
|
| 255 |
+
state_dict[key] = val
|
| 256 |
+
|
| 257 |
+
# load HuggingFace model
|
| 258 |
+
if checkpoint_url[-9:-4] == "pt22k":
|
| 259 |
+
model = BeitForMaskedImageModeling(config)
|
| 260 |
+
elif "ade20k" in checkpoint_url:
|
| 261 |
+
model = BeitForSemanticSegmentation(config)
|
| 262 |
+
else:
|
| 263 |
+
model = BeitForImageClassification(config)
|
| 264 |
+
model.eval()
|
| 265 |
+
model.load_state_dict(state_dict)
|
| 266 |
+
|
| 267 |
+
# Check outputs on an image
|
| 268 |
+
if is_semantic:
|
| 269 |
+
image_processor = BeitImageProcessor(size=config.image_size, do_center_crop=False)
|
| 270 |
+
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
|
| 271 |
+
image = Image.open(ds[0]["file"])
|
| 272 |
+
else:
|
| 273 |
+
image_processor = BeitImageProcessor(
|
| 274 |
+
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False
|
| 275 |
+
)
|
| 276 |
+
image = prepare_img()
|
| 277 |
+
|
| 278 |
+
encoding = image_processor(images=image, return_tensors="pt")
|
| 279 |
+
pixel_values = encoding["pixel_values"]
|
| 280 |
+
|
| 281 |
+
outputs = model(pixel_values)
|
| 282 |
+
logits = outputs.logits
|
| 283 |
+
|
| 284 |
+
# verify logits
|
| 285 |
+
expected_shape = torch.Size([1, 1000])
|
| 286 |
+
if checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k"):
|
| 287 |
+
expected_shape = torch.Size([1, 196, 8192])
|
| 288 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k"):
|
| 289 |
+
expected_shape = torch.Size([1, 196, 8192])
|
| 290 |
+
elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22k"):
|
| 291 |
+
expected_shape = torch.Size([1, 21841])
|
| 292 |
+
expected_logits = torch.tensor([2.2288, 2.4671, 0.7395])
|
| 293 |
+
expected_class_idx = 2397
|
| 294 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22k"):
|
| 295 |
+
expected_shape = torch.Size([1, 21841])
|
| 296 |
+
expected_logits = torch.tensor([1.6881, -0.2787, 0.5901])
|
| 297 |
+
expected_class_idx = 2396
|
| 298 |
+
elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft1k"):
|
| 299 |
+
expected_logits = torch.tensor([0.1241, 0.0798, -0.6569])
|
| 300 |
+
expected_class_idx = 285
|
| 301 |
+
elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22kto1k"):
|
| 302 |
+
expected_logits = torch.tensor([-1.2385, -1.0987, -1.0108])
|
| 303 |
+
expected_class_idx = 281
|
| 304 |
+
elif checkpoint_url[:-4].endswith("beit_base_patch16_384_pt22k_ft22kto1k"):
|
| 305 |
+
expected_logits = torch.tensor([-1.5303, -0.9484, -0.3147])
|
| 306 |
+
expected_class_idx = 761
|
| 307 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft1k"):
|
| 308 |
+
expected_logits = torch.tensor([0.4610, -0.0928, 0.2086])
|
| 309 |
+
expected_class_idx = 761
|
| 310 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22kto1k"):
|
| 311 |
+
expected_logits = torch.tensor([-0.4804, 0.6257, -0.1837])
|
| 312 |
+
expected_class_idx = 761
|
| 313 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_384_pt22k_ft22kto1k"):
|
| 314 |
+
expected_logits = torch.tensor([[-0.5122, 0.5117, -0.2113]])
|
| 315 |
+
expected_class_idx = 761
|
| 316 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_512_pt22k_ft22kto1k"):
|
| 317 |
+
expected_logits = torch.tensor([-0.3062, 0.7261, 0.4852])
|
| 318 |
+
expected_class_idx = 761
|
| 319 |
+
elif checkpoint_url[:-4].endswith("beit_base_patch16_640_pt22k_ft22ktoade20k"):
|
| 320 |
+
expected_shape = (1, 150, 160, 160)
|
| 321 |
+
expected_logits = torch.tensor(
|
| 322 |
+
[
|
| 323 |
+
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
|
| 324 |
+
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
|
| 325 |
+
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
|
| 326 |
+
]
|
| 327 |
+
)
|
| 328 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_640_pt22k_ft22ktoade20k"):
|
| 329 |
+
expected_shape = (1, 150, 160, 160)
|
| 330 |
+
expected_logits = torch.tensor(
|
| 331 |
+
[
|
| 332 |
+
[[-4.3305, -2.3049, -3.0161], [-2.9591, -1.5305, -2.2251], [-3.4198, -1.8004, -2.9062]],
|
| 333 |
+
[[-5.8922, -3.7435, -4.3978], [-4.2063, -2.7872, -3.4755], [-4.2791, -3.1874, -4.1681]],
|
| 334 |
+
[[0.9895, 4.3467, 4.7663], [4.2476, 5.6830, 6.1518], [4.5550, 6.2495, 6.5154]],
|
| 335 |
+
]
|
| 336 |
+
)
|
| 337 |
+
else:
|
| 338 |
+
raise ValueError("Can't verify logits as model is not supported")
|
| 339 |
+
|
| 340 |
+
if logits.shape != expected_shape:
|
| 341 |
+
raise ValueError(f"Shape of logits not as expected. {logits.shape=}, {expected_shape=}")
|
| 342 |
+
if not has_lm_head:
|
| 343 |
+
if is_semantic:
|
| 344 |
+
if not torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-3):
|
| 345 |
+
raise ValueError("First elements of logits not as expected")
|
| 346 |
+
else:
|
| 347 |
+
print("Predicted class idx:", logits.argmax(-1).item())
|
| 348 |
+
|
| 349 |
+
if not torch.allclose(logits[0, :3], expected_logits, atol=1e-3):
|
| 350 |
+
raise ValueError("First elements of logits not as expected")
|
| 351 |
+
if logits.argmax(-1).item() != expected_class_idx:
|
| 352 |
+
raise ValueError("Predicted class index not as expected")
|
| 353 |
+
|
| 354 |
+
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
| 355 |
+
print(f"Saving model to {pytorch_dump_folder_path}")
|
| 356 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
| 357 |
+
print(f"Saving image processor to {pytorch_dump_folder_path}")
|
| 358 |
+
image_processor.save_pretrained(pytorch_dump_folder_path)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
if __name__ == "__main__":
|
| 362 |
+
parser = argparse.ArgumentParser()
|
| 363 |
+
|
| 364 |
+
parser.add_argument(
|
| 365 |
+
"--checkpoint_url",
|
| 366 |
+
default="https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth",
|
| 367 |
+
type=str,
|
| 368 |
+
help="URL to the original PyTorch checkpoint (.pth file).",
|
| 369 |
+
)
|
| 370 |
+
parser.add_argument(
|
| 371 |
+
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
|
| 372 |
+
)
|
| 373 |
+
args = parser.parse_args()
|
| 374 |
+
convert_beit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/feature_extraction_beit.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Feature extractor class for BEiT."""
|
| 16 |
+
|
| 17 |
+
import warnings
|
| 18 |
+
|
| 19 |
+
from ...utils import logging
|
| 20 |
+
from .image_processing_beit import BeitImageProcessor
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.get_logger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class BeitFeatureExtractor(BeitImageProcessor):
|
| 27 |
+
def __init__(self, *args, **kwargs) -> None:
|
| 28 |
+
warnings.warn(
|
| 29 |
+
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
|
| 30 |
+
" use BeitImageProcessor instead.",
|
| 31 |
+
FutureWarning,
|
| 32 |
+
)
|
| 33 |
+
super().__init__(*args, **kwargs)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/image_processing_beit.py
ADDED
|
@@ -0,0 +1,507 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Image processor class for Beit."""
|
| 16 |
+
|
| 17 |
+
import warnings
|
| 18 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
|
| 22 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
| 23 |
+
from ...image_transforms import resize, to_channel_dimension_format
|
| 24 |
+
from ...image_utils import (
|
| 25 |
+
IMAGENET_STANDARD_MEAN,
|
| 26 |
+
IMAGENET_STANDARD_STD,
|
| 27 |
+
ChannelDimension,
|
| 28 |
+
ImageInput,
|
| 29 |
+
PILImageResampling,
|
| 30 |
+
infer_channel_dimension_format,
|
| 31 |
+
is_scaled_image,
|
| 32 |
+
make_list_of_images,
|
| 33 |
+
to_numpy_array,
|
| 34 |
+
valid_images,
|
| 35 |
+
validate_preprocess_arguments,
|
| 36 |
+
)
|
| 37 |
+
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
if is_vision_available():
|
| 41 |
+
import PIL
|
| 42 |
+
|
| 43 |
+
if is_torch_available():
|
| 44 |
+
import torch
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class BeitImageProcessor(BaseImageProcessor):
|
| 51 |
+
r"""
|
| 52 |
+
Constructs a BEiT image processor.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
| 56 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
|
| 57 |
+
`do_resize` parameter in the `preprocess` method.
|
| 58 |
+
size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
|
| 59 |
+
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
|
| 60 |
+
method.
|
| 61 |
+
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
|
| 62 |
+
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
|
| 63 |
+
`preprocess` method.
|
| 64 |
+
do_center_crop (`bool`, *optional*, defaults to `True`):
|
| 65 |
+
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
|
| 66 |
+
is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
|
| 67 |
+
`preprocess` method.
|
| 68 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
|
| 69 |
+
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
|
| 70 |
+
Can be overridden by the `crop_size` parameter in the `preprocess` method.
|
| 71 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
| 72 |
+
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
|
| 73 |
+
`preprocess` method.
|
| 74 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
| 75 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
|
| 76 |
+
parameter in the `preprocess` method.
|
| 77 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
| 78 |
+
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
|
| 79 |
+
method.
|
| 80 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
|
| 81 |
+
The mean to use if normalizing the image. This is a float or list of floats of length of the number of
|
| 82 |
+
channels of the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
| 83 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
|
| 84 |
+
The standard deviation to use if normalizing the image. This is a float or list of floats of length of the
|
| 85 |
+
number of channels of the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 86 |
+
do_reduce_labels (`bool`, *optional*, defaults to `False`):
|
| 87 |
+
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
|
| 88 |
+
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
|
| 89 |
+
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
|
| 90 |
+
`preprocess` method.
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
model_input_names = ["pixel_values"]
|
| 94 |
+
|
| 95 |
+
def __init__(
|
| 96 |
+
self,
|
| 97 |
+
do_resize: bool = True,
|
| 98 |
+
size: Dict[str, int] = None,
|
| 99 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
| 100 |
+
do_center_crop: bool = True,
|
| 101 |
+
crop_size: Dict[str, int] = None,
|
| 102 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
| 103 |
+
do_rescale: bool = True,
|
| 104 |
+
do_normalize: bool = True,
|
| 105 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 106 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 107 |
+
do_reduce_labels: bool = False,
|
| 108 |
+
**kwargs,
|
| 109 |
+
) -> None:
|
| 110 |
+
if "reduce_labels" in kwargs:
|
| 111 |
+
warnings.warn(
|
| 112 |
+
"The `reduce_labels` parameter is deprecated and will be removed in a future version. Please use"
|
| 113 |
+
" `do_reduce_labels` instead.",
|
| 114 |
+
FutureWarning,
|
| 115 |
+
)
|
| 116 |
+
do_reduce_labels = kwargs.pop("reduce_labels")
|
| 117 |
+
super().__init__(**kwargs)
|
| 118 |
+
size = size if size is not None else {"height": 256, "width": 256}
|
| 119 |
+
size = get_size_dict(size)
|
| 120 |
+
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
|
| 121 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size")
|
| 122 |
+
self.do_resize = do_resize
|
| 123 |
+
self.size = size
|
| 124 |
+
self.resample = resample
|
| 125 |
+
self.do_center_crop = do_center_crop
|
| 126 |
+
self.crop_size = crop_size
|
| 127 |
+
self.do_rescale = do_rescale
|
| 128 |
+
self.rescale_factor = rescale_factor
|
| 129 |
+
self.do_normalize = do_normalize
|
| 130 |
+
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
|
| 131 |
+
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
|
| 132 |
+
self.do_reduce_labels = do_reduce_labels
|
| 133 |
+
|
| 134 |
+
@classmethod
|
| 135 |
+
def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
|
| 136 |
+
"""
|
| 137 |
+
Overrides the `from_dict` method from the base class to make sure `reduce_labels` is updated if image processor
|
| 138 |
+
is created using from_dict and kwargs e.g. `BeitImageProcessor.from_pretrained(checkpoint, reduce_labels=True)`
|
| 139 |
+
"""
|
| 140 |
+
image_processor_dict = image_processor_dict.copy()
|
| 141 |
+
if "reduce_labels" in kwargs:
|
| 142 |
+
image_processor_dict["reduce_labels"] = kwargs.pop("reduce_labels")
|
| 143 |
+
return super().from_dict(image_processor_dict, **kwargs)
|
| 144 |
+
|
| 145 |
+
def resize(
|
| 146 |
+
self,
|
| 147 |
+
image: np.ndarray,
|
| 148 |
+
size: Dict[str, int],
|
| 149 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
| 150 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 151 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 152 |
+
**kwargs,
|
| 153 |
+
) -> np.ndarray:
|
| 154 |
+
"""
|
| 155 |
+
Resize an image to (size["height"], size["width"]).
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
image (`np.ndarray`):
|
| 159 |
+
Image to resize.
|
| 160 |
+
size (`Dict[str, int]`):
|
| 161 |
+
Size of the output image.
|
| 162 |
+
resample (`PILImageResampling`, *optional*, defaults to `PIL.Image.BICUBIC`):
|
| 163 |
+
Resampling filter to use when resiizing the image.
|
| 164 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
| 165 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
| 166 |
+
input_data_format (`str` or `ChannelDimension`, *optional*):
|
| 167 |
+
The channel dimension format of the input image. If not provided, it will be inferred.
|
| 168 |
+
"""
|
| 169 |
+
size = get_size_dict(size, default_to_square=True, param_name="size")
|
| 170 |
+
if "height" not in size or "width" not in size:
|
| 171 |
+
raise ValueError(f"The `size` argument must contain `height` and `width` keys. Got {size.keys()}")
|
| 172 |
+
return resize(
|
| 173 |
+
image,
|
| 174 |
+
size=(size["height"], size["width"]),
|
| 175 |
+
resample=resample,
|
| 176 |
+
data_format=data_format,
|
| 177 |
+
input_data_format=input_data_format,
|
| 178 |
+
**kwargs,
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
def reduce_label(self, label: ImageInput) -> np.ndarray:
|
| 182 |
+
label = to_numpy_array(label)
|
| 183 |
+
# Avoid using underflow conversion
|
| 184 |
+
label[label == 0] = 255
|
| 185 |
+
label = label - 1
|
| 186 |
+
label[label == 254] = 255
|
| 187 |
+
return label
|
| 188 |
+
|
| 189 |
+
def _preprocess(
|
| 190 |
+
self,
|
| 191 |
+
image: ImageInput,
|
| 192 |
+
do_reduce_labels: bool = None,
|
| 193 |
+
do_resize: bool = None,
|
| 194 |
+
size: Dict[str, int] = None,
|
| 195 |
+
resample: PILImageResampling = None,
|
| 196 |
+
do_center_crop: bool = None,
|
| 197 |
+
crop_size: Dict[str, int] = None,
|
| 198 |
+
do_rescale: bool = None,
|
| 199 |
+
rescale_factor: float = None,
|
| 200 |
+
do_normalize: bool = None,
|
| 201 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 202 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 203 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 204 |
+
):
|
| 205 |
+
if do_reduce_labels:
|
| 206 |
+
image = self.reduce_label(image)
|
| 207 |
+
|
| 208 |
+
if do_resize:
|
| 209 |
+
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
|
| 210 |
+
|
| 211 |
+
if do_center_crop:
|
| 212 |
+
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
|
| 213 |
+
|
| 214 |
+
if do_rescale:
|
| 215 |
+
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
| 216 |
+
|
| 217 |
+
if do_normalize:
|
| 218 |
+
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
|
| 219 |
+
|
| 220 |
+
return image
|
| 221 |
+
|
| 222 |
+
def _preprocess_image(
|
| 223 |
+
self,
|
| 224 |
+
image: ImageInput,
|
| 225 |
+
do_resize: bool = None,
|
| 226 |
+
size: Dict[str, int] = None,
|
| 227 |
+
resample: PILImageResampling = None,
|
| 228 |
+
do_center_crop: bool = None,
|
| 229 |
+
crop_size: Dict[str, int] = None,
|
| 230 |
+
do_rescale: bool = None,
|
| 231 |
+
rescale_factor: float = None,
|
| 232 |
+
do_normalize: bool = None,
|
| 233 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 234 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 235 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 236 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 237 |
+
) -> np.ndarray:
|
| 238 |
+
"""Preprocesses a single image."""
|
| 239 |
+
# All transformations expect numpy arrays.
|
| 240 |
+
image = to_numpy_array(image)
|
| 241 |
+
if is_scaled_image(image) and do_rescale:
|
| 242 |
+
logger.warning_once(
|
| 243 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
| 244 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
| 245 |
+
)
|
| 246 |
+
if input_data_format is None:
|
| 247 |
+
input_data_format = infer_channel_dimension_format(image)
|
| 248 |
+
image = self._preprocess(
|
| 249 |
+
image,
|
| 250 |
+
do_reduce_labels=False,
|
| 251 |
+
do_resize=do_resize,
|
| 252 |
+
size=size,
|
| 253 |
+
resample=resample,
|
| 254 |
+
do_center_crop=do_center_crop,
|
| 255 |
+
crop_size=crop_size,
|
| 256 |
+
do_rescale=do_rescale,
|
| 257 |
+
rescale_factor=rescale_factor,
|
| 258 |
+
do_normalize=do_normalize,
|
| 259 |
+
image_mean=image_mean,
|
| 260 |
+
image_std=image_std,
|
| 261 |
+
input_data_format=input_data_format,
|
| 262 |
+
)
|
| 263 |
+
if data_format is not None:
|
| 264 |
+
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
|
| 265 |
+
return image
|
| 266 |
+
|
| 267 |
+
def _preprocess_segmentation_map(
|
| 268 |
+
self,
|
| 269 |
+
segmentation_map: ImageInput,
|
| 270 |
+
do_resize: bool = None,
|
| 271 |
+
size: Dict[str, int] = None,
|
| 272 |
+
resample: PILImageResampling = None,
|
| 273 |
+
do_center_crop: bool = None,
|
| 274 |
+
crop_size: Dict[str, int] = None,
|
| 275 |
+
do_reduce_labels: bool = None,
|
| 276 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 277 |
+
):
|
| 278 |
+
"""Preprocesses a single segmentation map."""
|
| 279 |
+
# All transformations expect numpy arrays.
|
| 280 |
+
segmentation_map = to_numpy_array(segmentation_map)
|
| 281 |
+
# Add an axis to the segmentation maps for transformations.
|
| 282 |
+
if segmentation_map.ndim == 2:
|
| 283 |
+
segmentation_map = segmentation_map[None, ...]
|
| 284 |
+
added_dimension = True
|
| 285 |
+
input_data_format = ChannelDimension.FIRST
|
| 286 |
+
else:
|
| 287 |
+
added_dimension = False
|
| 288 |
+
if input_data_format is None:
|
| 289 |
+
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
|
| 290 |
+
segmentation_map = self._preprocess(
|
| 291 |
+
image=segmentation_map,
|
| 292 |
+
do_reduce_labels=do_reduce_labels,
|
| 293 |
+
do_resize=do_resize,
|
| 294 |
+
resample=resample,
|
| 295 |
+
size=size,
|
| 296 |
+
do_center_crop=do_center_crop,
|
| 297 |
+
crop_size=crop_size,
|
| 298 |
+
do_normalize=False,
|
| 299 |
+
do_rescale=False,
|
| 300 |
+
input_data_format=ChannelDimension.FIRST,
|
| 301 |
+
)
|
| 302 |
+
# Remove extra axis if added
|
| 303 |
+
if added_dimension:
|
| 304 |
+
segmentation_map = np.squeeze(segmentation_map, axis=0)
|
| 305 |
+
segmentation_map = segmentation_map.astype(np.int64)
|
| 306 |
+
return segmentation_map
|
| 307 |
+
|
| 308 |
+
def __call__(self, images, segmentation_maps=None, **kwargs):
|
| 309 |
+
# Overrides the `__call__` method of the `Preprocessor` class such that the images and segmentation maps can both
|
| 310 |
+
# be passed in as positional arguments.
|
| 311 |
+
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
|
| 312 |
+
|
| 313 |
+
def preprocess(
|
| 314 |
+
self,
|
| 315 |
+
images: ImageInput,
|
| 316 |
+
segmentation_maps: Optional[ImageInput] = None,
|
| 317 |
+
do_resize: bool = None,
|
| 318 |
+
size: Dict[str, int] = None,
|
| 319 |
+
resample: PILImageResampling = None,
|
| 320 |
+
do_center_crop: bool = None,
|
| 321 |
+
crop_size: Dict[str, int] = None,
|
| 322 |
+
do_rescale: bool = None,
|
| 323 |
+
rescale_factor: float = None,
|
| 324 |
+
do_normalize: bool = None,
|
| 325 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 326 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 327 |
+
do_reduce_labels: Optional[bool] = None,
|
| 328 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
| 329 |
+
data_format: ChannelDimension = ChannelDimension.FIRST,
|
| 330 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| 331 |
+
**kwargs,
|
| 332 |
+
) -> PIL.Image.Image:
|
| 333 |
+
"""
|
| 334 |
+
Preprocess an image or batch of images.
|
| 335 |
+
|
| 336 |
+
Args:
|
| 337 |
+
images (`ImageInput`):
|
| 338 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
| 339 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
| 340 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
| 341 |
+
Whether to resize the image.
|
| 342 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
| 343 |
+
Size of the image after resizing.
|
| 344 |
+
resample (`int`, *optional*, defaults to `self.resample`):
|
| 345 |
+
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
|
| 346 |
+
has an effect if `do_resize` is set to `True`.
|
| 347 |
+
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
|
| 348 |
+
Whether to center crop the image.
|
| 349 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
|
| 350 |
+
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
|
| 351 |
+
padded with zeros and then cropped
|
| 352 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
| 353 |
+
Whether to rescale the image values between [0 - 1].
|
| 354 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
| 355 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
| 356 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
| 357 |
+
Whether to normalize the image.
|
| 358 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
| 359 |
+
Image mean.
|
| 360 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
| 361 |
+
Image standard deviation.
|
| 362 |
+
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
|
| 363 |
+
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
|
| 364 |
+
is used for background, and background itself is not included in all classes of a dataset (e.g.
|
| 365 |
+
ADE20k). The background label will be replaced by 255.
|
| 366 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
| 367 |
+
The type of tensors to return. Can be one of:
|
| 368 |
+
- Unset: Return a list of `np.ndarray`.
|
| 369 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
| 370 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
| 371 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
| 372 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
| 373 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
| 374 |
+
The channel dimension format for the output image. Can be one of:
|
| 375 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 376 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 377 |
+
- Unset: Use the channel dimension format of the input image.
|
| 378 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
| 379 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
| 380 |
+
from the input image. Can be one of:
|
| 381 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| 382 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| 383 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
| 384 |
+
"""
|
| 385 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
| 386 |
+
size = size if size is not None else self.size
|
| 387 |
+
size = get_size_dict(size, default_to_square=True, param_name="size")
|
| 388 |
+
resample = resample if resample is not None else self.resample
|
| 389 |
+
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
|
| 390 |
+
crop_size = crop_size if crop_size is not None else self.crop_size
|
| 391 |
+
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
|
| 392 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
| 393 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
| 394 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
| 395 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
| 396 |
+
image_std = image_std if image_std is not None else self.image_std
|
| 397 |
+
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
|
| 398 |
+
|
| 399 |
+
images = make_list_of_images(images)
|
| 400 |
+
|
| 401 |
+
if segmentation_maps is not None:
|
| 402 |
+
segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
|
| 403 |
+
|
| 404 |
+
if segmentation_maps is not None and not valid_images(segmentation_maps):
|
| 405 |
+
raise ValueError(
|
| 406 |
+
"Invalid segmentation_maps type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
| 407 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
| 408 |
+
)
|
| 409 |
+
if not valid_images(images):
|
| 410 |
+
raise ValueError(
|
| 411 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
| 412 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
validate_preprocess_arguments(
|
| 416 |
+
do_rescale=do_rescale,
|
| 417 |
+
rescale_factor=rescale_factor,
|
| 418 |
+
do_normalize=do_normalize,
|
| 419 |
+
image_mean=image_mean,
|
| 420 |
+
image_std=image_std,
|
| 421 |
+
do_center_crop=do_center_crop,
|
| 422 |
+
crop_size=crop_size,
|
| 423 |
+
do_resize=do_resize,
|
| 424 |
+
size=size,
|
| 425 |
+
resample=resample,
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
images = [
|
| 429 |
+
self._preprocess_image(
|
| 430 |
+
image=img,
|
| 431 |
+
do_resize=do_resize,
|
| 432 |
+
do_center_crop=do_center_crop,
|
| 433 |
+
do_rescale=do_rescale,
|
| 434 |
+
do_normalize=do_normalize,
|
| 435 |
+
resample=resample,
|
| 436 |
+
size=size,
|
| 437 |
+
rescale_factor=rescale_factor,
|
| 438 |
+
crop_size=crop_size,
|
| 439 |
+
image_mean=image_mean,
|
| 440 |
+
image_std=image_std,
|
| 441 |
+
data_format=data_format,
|
| 442 |
+
input_data_format=input_data_format,
|
| 443 |
+
)
|
| 444 |
+
for img in images
|
| 445 |
+
]
|
| 446 |
+
|
| 447 |
+
data = {"pixel_values": images}
|
| 448 |
+
|
| 449 |
+
if segmentation_maps is not None:
|
| 450 |
+
segmentation_maps = [
|
| 451 |
+
self._preprocess_segmentation_map(
|
| 452 |
+
segmentation_map=segmentation_map,
|
| 453 |
+
do_reduce_labels=do_reduce_labels,
|
| 454 |
+
do_resize=do_resize,
|
| 455 |
+
resample=resample,
|
| 456 |
+
size=size,
|
| 457 |
+
do_center_crop=do_center_crop,
|
| 458 |
+
crop_size=crop_size,
|
| 459 |
+
)
|
| 460 |
+
for segmentation_map in segmentation_maps
|
| 461 |
+
]
|
| 462 |
+
data["labels"] = segmentation_maps
|
| 463 |
+
|
| 464 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
| 465 |
+
|
| 466 |
+
def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
|
| 467 |
+
"""
|
| 468 |
+
Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
|
| 469 |
+
|
| 470 |
+
Args:
|
| 471 |
+
outputs ([`BeitForSemanticSegmentation`]):
|
| 472 |
+
Raw outputs of the model.
|
| 473 |
+
target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
|
| 474 |
+
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
|
| 475 |
+
predictions will not be resized.
|
| 476 |
+
|
| 477 |
+
Returns:
|
| 478 |
+
semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
|
| 479 |
+
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
|
| 480 |
+
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
|
| 481 |
+
"""
|
| 482 |
+
# TODO: add support for other frameworks
|
| 483 |
+
logits = outputs.logits
|
| 484 |
+
|
| 485 |
+
# Resize logits and compute semantic segmentation maps
|
| 486 |
+
if target_sizes is not None:
|
| 487 |
+
if len(logits) != len(target_sizes):
|
| 488 |
+
raise ValueError(
|
| 489 |
+
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
if is_torch_tensor(target_sizes):
|
| 493 |
+
target_sizes = target_sizes.numpy()
|
| 494 |
+
|
| 495 |
+
semantic_segmentation = []
|
| 496 |
+
|
| 497 |
+
for idx in range(len(logits)):
|
| 498 |
+
resized_logits = torch.nn.functional.interpolate(
|
| 499 |
+
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
|
| 500 |
+
)
|
| 501 |
+
semantic_map = resized_logits[0].argmax(dim=0)
|
| 502 |
+
semantic_segmentation.append(semantic_map)
|
| 503 |
+
else:
|
| 504 |
+
semantic_segmentation = logits.argmax(dim=1)
|
| 505 |
+
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
|
| 506 |
+
|
| 507 |
+
return semantic_segmentation
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/modeling_beit.py
ADDED
|
@@ -0,0 +1,1427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" PyTorch BEiT model."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import collections.abc
|
| 19 |
+
import math
|
| 20 |
+
from dataclasses import dataclass
|
| 21 |
+
from typing import List, Optional, Tuple, Union
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.utils.checkpoint
|
| 25 |
+
from torch import Tensor, nn
|
| 26 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 27 |
+
|
| 28 |
+
from ...activations import ACT2FN
|
| 29 |
+
from ...modeling_outputs import (
|
| 30 |
+
BackboneOutput,
|
| 31 |
+
BaseModelOutput,
|
| 32 |
+
BaseModelOutputWithPooling,
|
| 33 |
+
ImageClassifierOutput,
|
| 34 |
+
MaskedLMOutput,
|
| 35 |
+
SemanticSegmenterOutput,
|
| 36 |
+
)
|
| 37 |
+
from ...modeling_utils import PreTrainedModel
|
| 38 |
+
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
|
| 39 |
+
from ...utils import (
|
| 40 |
+
add_code_sample_docstrings,
|
| 41 |
+
add_start_docstrings,
|
| 42 |
+
add_start_docstrings_to_model_forward,
|
| 43 |
+
logging,
|
| 44 |
+
replace_return_docstrings,
|
| 45 |
+
)
|
| 46 |
+
from ...utils.backbone_utils import BackboneMixin
|
| 47 |
+
from .configuration_beit import BeitConfig
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
logger = logging.get_logger(__name__)
|
| 51 |
+
|
| 52 |
+
# General docstring
|
| 53 |
+
_CONFIG_FOR_DOC = "BeitConfig"
|
| 54 |
+
|
| 55 |
+
# Base docstring
|
| 56 |
+
_CHECKPOINT_FOR_DOC = "microsoft/beit-base-patch16-224-pt22k"
|
| 57 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
|
| 58 |
+
|
| 59 |
+
# Image classification docstring
|
| 60 |
+
_IMAGE_CLASS_CHECKPOINT = "microsoft/beit-base-patch16-224"
|
| 61 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
| 62 |
+
|
| 63 |
+
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 64 |
+
"microsoft/beit-base-patch16-224",
|
| 65 |
+
# See all BEiT models at https://huggingface.co/models?filter=beit
|
| 66 |
+
]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@dataclass
|
| 70 |
+
class BeitModelOutputWithPooling(BaseModelOutputWithPooling):
|
| 71 |
+
"""
|
| 72 |
+
Class for outputs of [`BeitModel`].
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 76 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 77 |
+
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
|
| 78 |
+
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
|
| 79 |
+
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
|
| 80 |
+
will be returned.
|
| 81 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 82 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
|
| 83 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
| 84 |
+
|
| 85 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
| 86 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 87 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| 88 |
+
sequence_length)`.
|
| 89 |
+
|
| 90 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| 91 |
+
heads.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
|
| 96 |
+
"""
|
| 97 |
+
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
| 98 |
+
|
| 99 |
+
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
|
| 100 |
+
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
| 101 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
|
| 102 |
+
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
|
| 103 |
+
argument.
|
| 104 |
+
"""
|
| 105 |
+
if drop_prob == 0.0 or not training:
|
| 106 |
+
return input
|
| 107 |
+
keep_prob = 1 - drop_prob
|
| 108 |
+
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
| 109 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
|
| 110 |
+
random_tensor.floor_() # binarize
|
| 111 |
+
output = input.div(keep_prob) * random_tensor
|
| 112 |
+
return output
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class BeitDropPath(nn.Module):
|
| 116 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
| 117 |
+
|
| 118 |
+
def __init__(self, drop_prob: Optional[float] = None) -> None:
|
| 119 |
+
super().__init__()
|
| 120 |
+
self.drop_prob = drop_prob
|
| 121 |
+
|
| 122 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 123 |
+
return drop_path(hidden_states, self.drop_prob, self.training)
|
| 124 |
+
|
| 125 |
+
def extra_repr(self) -> str:
|
| 126 |
+
return "p={}".format(self.drop_prob)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
# Based on timm implementation, which can be found here:
|
| 130 |
+
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
|
| 131 |
+
class BeitEmbeddings(nn.Module):
|
| 132 |
+
"""
|
| 133 |
+
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
|
| 134 |
+
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
def __init__(self, config: BeitConfig) -> None:
|
| 138 |
+
super().__init__()
|
| 139 |
+
|
| 140 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
|
| 141 |
+
if config.use_mask_token:
|
| 142 |
+
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
|
| 143 |
+
else:
|
| 144 |
+
self.mask_token = None
|
| 145 |
+
self.patch_embeddings = BeitPatchEmbeddings(config)
|
| 146 |
+
num_patches = self.patch_embeddings.num_patches
|
| 147 |
+
if config.use_absolute_position_embeddings:
|
| 148 |
+
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
|
| 149 |
+
else:
|
| 150 |
+
self.position_embeddings = None
|
| 151 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 152 |
+
|
| 153 |
+
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None) -> torch.Tensor:
|
| 154 |
+
embeddings, (patch_height, patch_width) = self.patch_embeddings(
|
| 155 |
+
pixel_values, self.position_embeddings[:, 1:, :] if self.position_embeddings is not None else None
|
| 156 |
+
)
|
| 157 |
+
batch_size, seq_len, _ = embeddings.size()
|
| 158 |
+
|
| 159 |
+
if bool_masked_pos is not None:
|
| 160 |
+
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
|
| 161 |
+
# replace the masked visual tokens by mask_tokens
|
| 162 |
+
w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
|
| 163 |
+
embeddings = embeddings * (1 - w) + mask_tokens * w
|
| 164 |
+
|
| 165 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
|
| 166 |
+
if self.position_embeddings is not None:
|
| 167 |
+
cls_tokens = cls_tokens + self.position_embeddings[:, :1, :]
|
| 168 |
+
|
| 169 |
+
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
|
| 170 |
+
|
| 171 |
+
embeddings = self.dropout(embeddings)
|
| 172 |
+
|
| 173 |
+
return embeddings, (patch_height, patch_width)
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
class BeitPatchEmbeddings(nn.Module):
|
| 177 |
+
"""
|
| 178 |
+
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
|
| 179 |
+
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
|
| 180 |
+
Transformer.
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
def __init__(self, config):
|
| 184 |
+
super().__init__()
|
| 185 |
+
image_size, patch_size = config.image_size, config.patch_size
|
| 186 |
+
num_channels, hidden_size = config.num_channels, config.hidden_size
|
| 187 |
+
|
| 188 |
+
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
|
| 189 |
+
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
|
| 190 |
+
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
|
| 191 |
+
patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
|
| 192 |
+
self.image_size = image_size
|
| 193 |
+
self.patch_size = patch_size
|
| 194 |
+
self.num_channels = num_channels
|
| 195 |
+
self.num_patches = num_patches
|
| 196 |
+
self.patch_shape = patch_shape
|
| 197 |
+
|
| 198 |
+
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
|
| 199 |
+
|
| 200 |
+
def forward(self, pixel_values: torch.Tensor, position_embedding: Optional[torch.Tensor] = None) -> torch.Tensor:
|
| 201 |
+
batch_size, num_channels, height, width = pixel_values.shape
|
| 202 |
+
if num_channels != self.num_channels:
|
| 203 |
+
raise ValueError(
|
| 204 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
embeddings = self.projection(pixel_values)
|
| 208 |
+
patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
|
| 209 |
+
|
| 210 |
+
if position_embedding is not None:
|
| 211 |
+
# interpolate the position embedding to the corresponding size
|
| 212 |
+
position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(
|
| 213 |
+
0, 3, 1, 2
|
| 214 |
+
)
|
| 215 |
+
position_embedding = nn.functional.interpolate(
|
| 216 |
+
position_embedding, size=(patch_height, patch_width), mode="bicubic"
|
| 217 |
+
)
|
| 218 |
+
embeddings = embeddings + position_embedding
|
| 219 |
+
|
| 220 |
+
embeddings = embeddings.flatten(2).transpose(1, 2)
|
| 221 |
+
|
| 222 |
+
return embeddings, (patch_height, patch_width)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
class BeitSelfAttention(nn.Module):
|
| 226 |
+
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
|
| 227 |
+
super().__init__()
|
| 228 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
| 229 |
+
raise ValueError(
|
| 230 |
+
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
|
| 231 |
+
f"heads {config.num_attention_heads}."
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
self.num_attention_heads = config.num_attention_heads
|
| 235 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
| 236 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 237 |
+
|
| 238 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
| 239 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
|
| 240 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
| 241 |
+
|
| 242 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 243 |
+
|
| 244 |
+
if window_size:
|
| 245 |
+
self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size)
|
| 246 |
+
else:
|
| 247 |
+
self.relative_position_bias = None
|
| 248 |
+
|
| 249 |
+
def transpose_for_scores(self, x):
|
| 250 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
| 251 |
+
x = x.view(*new_x_shape)
|
| 252 |
+
return x.permute(0, 2, 1, 3)
|
| 253 |
+
|
| 254 |
+
def forward(
|
| 255 |
+
self,
|
| 256 |
+
hidden_states: torch.Tensor,
|
| 257 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 258 |
+
output_attentions: bool = False,
|
| 259 |
+
relative_position_bias: Optional["BeitRelativePositionBias"] = None,
|
| 260 |
+
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
|
| 261 |
+
mixed_query_layer = self.query(hidden_states)
|
| 262 |
+
|
| 263 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 264 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 265 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 266 |
+
|
| 267 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 268 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
| 269 |
+
|
| 270 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 271 |
+
|
| 272 |
+
# Add relative position bias if present.
|
| 273 |
+
if self.relative_position_bias is not None:
|
| 274 |
+
attention_scores = attention_scores + self.relative_position_bias().unsqueeze(0)
|
| 275 |
+
|
| 276 |
+
# Add shared relative position bias if provided.
|
| 277 |
+
if relative_position_bias is not None:
|
| 278 |
+
attention_scores = attention_scores + relative_position_bias
|
| 279 |
+
|
| 280 |
+
# Normalize the attention scores to probabilities.
|
| 281 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 282 |
+
|
| 283 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 284 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 285 |
+
attention_probs = self.dropout(attention_probs)
|
| 286 |
+
|
| 287 |
+
# Mask heads if we want to
|
| 288 |
+
if head_mask is not None:
|
| 289 |
+
attention_probs = attention_probs * head_mask
|
| 290 |
+
|
| 291 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
| 292 |
+
|
| 293 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 294 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 295 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
| 296 |
+
|
| 297 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 298 |
+
|
| 299 |
+
return outputs
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
class BeitSelfOutput(nn.Module):
|
| 303 |
+
"""
|
| 304 |
+
The residual connection is defined in BeitLayer instead of here (as is the case with other models), due to the
|
| 305 |
+
layernorm applied before each block.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
def __init__(self, config: BeitConfig) -> None:
|
| 309 |
+
super().__init__()
|
| 310 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 311 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 312 |
+
|
| 313 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
|
| 314 |
+
hidden_states = self.dense(hidden_states)
|
| 315 |
+
hidden_states = self.dropout(hidden_states)
|
| 316 |
+
|
| 317 |
+
return hidden_states
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class BeitAttention(nn.Module):
|
| 321 |
+
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
|
| 322 |
+
super().__init__()
|
| 323 |
+
self.attention = BeitSelfAttention(config, window_size=window_size)
|
| 324 |
+
self.output = BeitSelfOutput(config)
|
| 325 |
+
self.pruned_heads = set()
|
| 326 |
+
|
| 327 |
+
def prune_heads(self, heads):
|
| 328 |
+
if len(heads) == 0:
|
| 329 |
+
return
|
| 330 |
+
heads, index = find_pruneable_heads_and_indices(
|
| 331 |
+
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
# Prune linear layers
|
| 335 |
+
self.attention.query = prune_linear_layer(self.attention.query, index)
|
| 336 |
+
self.attention.key = prune_linear_layer(self.attention.key, index)
|
| 337 |
+
self.attention.value = prune_linear_layer(self.attention.value, index)
|
| 338 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
| 339 |
+
|
| 340 |
+
# Update hyper params and store pruned heads
|
| 341 |
+
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
|
| 342 |
+
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
|
| 343 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 344 |
+
|
| 345 |
+
def forward(
|
| 346 |
+
self,
|
| 347 |
+
hidden_states: torch.Tensor,
|
| 348 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 349 |
+
output_attentions: bool = False,
|
| 350 |
+
relative_position_bias: Optional["BeitRelativePositionBias"] = None,
|
| 351 |
+
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
|
| 352 |
+
self_outputs = self.attention(hidden_states, head_mask, output_attentions, relative_position_bias)
|
| 353 |
+
|
| 354 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
| 355 |
+
|
| 356 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
| 357 |
+
return outputs
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
class BeitIntermediate(nn.Module):
|
| 361 |
+
def __init__(self, config: BeitConfig) -> None:
|
| 362 |
+
super().__init__()
|
| 363 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 364 |
+
if isinstance(config.hidden_act, str):
|
| 365 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
| 366 |
+
else:
|
| 367 |
+
self.intermediate_act_fn = config.hidden_act
|
| 368 |
+
|
| 369 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 370 |
+
hidden_states = self.dense(hidden_states)
|
| 371 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 372 |
+
|
| 373 |
+
return hidden_states
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
class BeitOutput(nn.Module):
|
| 377 |
+
def __init__(self, config: BeitConfig) -> None:
|
| 378 |
+
super().__init__()
|
| 379 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 380 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 381 |
+
|
| 382 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 383 |
+
hidden_states = self.dense(hidden_states)
|
| 384 |
+
hidden_states = self.dropout(hidden_states)
|
| 385 |
+
|
| 386 |
+
return hidden_states
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class BeitLayer(nn.Module):
|
| 390 |
+
"""This corresponds to the Block class in the timm implementation."""
|
| 391 |
+
|
| 392 |
+
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0) -> None:
|
| 393 |
+
super().__init__()
|
| 394 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
| 395 |
+
self.seq_len_dim = 1
|
| 396 |
+
self.attention = BeitAttention(config, window_size=window_size)
|
| 397 |
+
self.intermediate = BeitIntermediate(config)
|
| 398 |
+
self.output = BeitOutput(config)
|
| 399 |
+
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 400 |
+
self.drop_path = BeitDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
|
| 401 |
+
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 402 |
+
|
| 403 |
+
init_values = config.layer_scale_init_value
|
| 404 |
+
if init_values > 0:
|
| 405 |
+
self.lambda_1 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
|
| 406 |
+
self.lambda_2 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
|
| 407 |
+
else:
|
| 408 |
+
self.lambda_1, self.lambda_2 = None, None
|
| 409 |
+
|
| 410 |
+
def forward(
|
| 411 |
+
self,
|
| 412 |
+
hidden_states: torch.Tensor,
|
| 413 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 414 |
+
output_attentions: bool = False,
|
| 415 |
+
relative_position_bias: Optional["BeitRelativePositionBias"] = None,
|
| 416 |
+
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
|
| 417 |
+
self_attention_outputs = self.attention(
|
| 418 |
+
self.layernorm_before(hidden_states), # in BEiT, layernorm is applied before self-attention
|
| 419 |
+
head_mask,
|
| 420 |
+
output_attentions=output_attentions,
|
| 421 |
+
relative_position_bias=relative_position_bias,
|
| 422 |
+
)
|
| 423 |
+
attention_output = self_attention_outputs[0]
|
| 424 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
| 425 |
+
|
| 426 |
+
# apply lambda_1 if present
|
| 427 |
+
if self.lambda_1 is not None:
|
| 428 |
+
attention_output = self.lambda_1 * attention_output
|
| 429 |
+
|
| 430 |
+
# first residual connection
|
| 431 |
+
hidden_states = self.drop_path(attention_output) + hidden_states
|
| 432 |
+
|
| 433 |
+
# in BEiT, layernorm is also applied after self-attention
|
| 434 |
+
layer_output = self.layernorm_after(hidden_states)
|
| 435 |
+
|
| 436 |
+
layer_output = self.intermediate(layer_output)
|
| 437 |
+
layer_output = self.output(layer_output)
|
| 438 |
+
|
| 439 |
+
if self.lambda_2 is not None:
|
| 440 |
+
layer_output = self.lambda_2 * layer_output
|
| 441 |
+
|
| 442 |
+
# second residual connection
|
| 443 |
+
layer_output = self.drop_path(layer_output) + hidden_states
|
| 444 |
+
|
| 445 |
+
outputs = (layer_output,) + outputs
|
| 446 |
+
|
| 447 |
+
return outputs
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
class BeitRelativePositionBias(nn.Module):
|
| 451 |
+
def __init__(self, config: BeitConfig, window_size: tuple) -> None:
|
| 452 |
+
super().__init__()
|
| 453 |
+
self.window_size = window_size
|
| 454 |
+
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
| 455 |
+
self.relative_position_bias_table = nn.Parameter(
|
| 456 |
+
torch.zeros(self.num_relative_distance, config.num_attention_heads)
|
| 457 |
+
) # 2*Wh-1 * 2*Ww-1, nH
|
| 458 |
+
# cls to token & token 2 cls & cls to cls
|
| 459 |
+
|
| 460 |
+
# get pair-wise relative position index for each token inside the window
|
| 461 |
+
coords_h = torch.arange(window_size[0])
|
| 462 |
+
coords_w = torch.arange(window_size[1])
|
| 463 |
+
coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) # 2, Wh, Ww
|
| 464 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
| 465 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
| 466 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
| 467 |
+
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
| 468 |
+
relative_coords[:, :, 1] += window_size[1] - 1
|
| 469 |
+
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
| 470 |
+
relative_position_index = torch.zeros(
|
| 471 |
+
size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype
|
| 472 |
+
)
|
| 473 |
+
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
| 474 |
+
relative_position_index[0, 0:] = self.num_relative_distance - 3
|
| 475 |
+
relative_position_index[0:, 0] = self.num_relative_distance - 2
|
| 476 |
+
relative_position_index[0, 0] = self.num_relative_distance - 1
|
| 477 |
+
|
| 478 |
+
self.register_buffer("relative_position_index", relative_position_index, persistent=False)
|
| 479 |
+
|
| 480 |
+
def forward(self) -> torch.Tensor:
|
| 481 |
+
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
| 482 |
+
self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1
|
| 483 |
+
) # Wh*Ww,Wh*Ww,nH
|
| 484 |
+
|
| 485 |
+
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
class BeitEncoder(nn.Module):
|
| 489 |
+
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
|
| 490 |
+
super().__init__()
|
| 491 |
+
self.config = config
|
| 492 |
+
if config.use_shared_relative_position_bias:
|
| 493 |
+
self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size)
|
| 494 |
+
else:
|
| 495 |
+
self.relative_position_bias = None
|
| 496 |
+
|
| 497 |
+
# stochastic depth decay rule
|
| 498 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
|
| 499 |
+
self.layer = nn.ModuleList(
|
| 500 |
+
[
|
| 501 |
+
BeitLayer(
|
| 502 |
+
config,
|
| 503 |
+
window_size=window_size if config.use_relative_position_bias else None,
|
| 504 |
+
drop_path_rate=dpr[i],
|
| 505 |
+
)
|
| 506 |
+
for i in range(config.num_hidden_layers)
|
| 507 |
+
]
|
| 508 |
+
)
|
| 509 |
+
self.gradient_checkpointing = False
|
| 510 |
+
|
| 511 |
+
def forward(
|
| 512 |
+
self,
|
| 513 |
+
hidden_states: torch.Tensor,
|
| 514 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 515 |
+
output_attentions: bool = False,
|
| 516 |
+
output_hidden_states: bool = False,
|
| 517 |
+
return_dict: bool = True,
|
| 518 |
+
) -> Union[tuple, BaseModelOutput]:
|
| 519 |
+
all_hidden_states = () if output_hidden_states else None
|
| 520 |
+
all_self_attentions = () if output_attentions else None
|
| 521 |
+
|
| 522 |
+
for i, layer_module in enumerate(self.layer):
|
| 523 |
+
if output_hidden_states:
|
| 524 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 525 |
+
|
| 526 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 527 |
+
|
| 528 |
+
if self.gradient_checkpointing and self.training:
|
| 529 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 530 |
+
layer_module.__call__,
|
| 531 |
+
hidden_states,
|
| 532 |
+
layer_head_mask,
|
| 533 |
+
output_attentions,
|
| 534 |
+
)
|
| 535 |
+
else:
|
| 536 |
+
relative_position_bias = (
|
| 537 |
+
self.relative_position_bias() if self.relative_position_bias is not None else None
|
| 538 |
+
)
|
| 539 |
+
layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias)
|
| 540 |
+
|
| 541 |
+
hidden_states = layer_outputs[0]
|
| 542 |
+
|
| 543 |
+
if output_attentions:
|
| 544 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 545 |
+
|
| 546 |
+
if output_hidden_states:
|
| 547 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 548 |
+
|
| 549 |
+
if not return_dict:
|
| 550 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
| 551 |
+
return BaseModelOutput(
|
| 552 |
+
last_hidden_state=hidden_states,
|
| 553 |
+
hidden_states=all_hidden_states,
|
| 554 |
+
attentions=all_self_attentions,
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
class BeitPreTrainedModel(PreTrainedModel):
|
| 559 |
+
"""
|
| 560 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 561 |
+
models.
|
| 562 |
+
"""
|
| 563 |
+
|
| 564 |
+
config_class = BeitConfig
|
| 565 |
+
base_model_prefix = "beit"
|
| 566 |
+
main_input_name = "pixel_values"
|
| 567 |
+
supports_gradient_checkpointing = True
|
| 568 |
+
|
| 569 |
+
def _init_weights(self, module):
|
| 570 |
+
"""Initialize the weights"""
|
| 571 |
+
if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
|
| 572 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 573 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 574 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 575 |
+
if module.bias is not None:
|
| 576 |
+
module.bias.data.zero_()
|
| 577 |
+
elif isinstance(module, nn.Embedding):
|
| 578 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 579 |
+
if module.padding_idx is not None:
|
| 580 |
+
module.weight.data[module.padding_idx].zero_()
|
| 581 |
+
elif isinstance(module, nn.LayerNorm):
|
| 582 |
+
module.bias.data.zero_()
|
| 583 |
+
module.weight.data.fill_(1.0)
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
BEIT_START_DOCSTRING = r"""
|
| 587 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
| 588 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 589 |
+
behavior.
|
| 590 |
+
|
| 591 |
+
Parameters:
|
| 592 |
+
config ([`BeitConfig`]): Model configuration class with all the parameters of the model.
|
| 593 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 594 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 595 |
+
"""
|
| 596 |
+
|
| 597 |
+
BEIT_INPUTS_DOCSTRING = r"""
|
| 598 |
+
Args:
|
| 599 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 600 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 601 |
+
[`BeitImageProcessor.__call__`] for details.
|
| 602 |
+
|
| 603 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 604 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 605 |
+
|
| 606 |
+
- 1 indicates the head is **not masked**,
|
| 607 |
+
- 0 indicates the head is **masked**.
|
| 608 |
+
|
| 609 |
+
output_attentions (`bool`, *optional*):
|
| 610 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 611 |
+
tensors for more detail.
|
| 612 |
+
output_hidden_states (`bool`, *optional*):
|
| 613 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 614 |
+
more detail.
|
| 615 |
+
return_dict (`bool`, *optional*):
|
| 616 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 617 |
+
"""
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
@add_start_docstrings(
|
| 621 |
+
"The bare Beit Model transformer outputting raw hidden-states without any specific head on top.",
|
| 622 |
+
BEIT_START_DOCSTRING,
|
| 623 |
+
)
|
| 624 |
+
class BeitModel(BeitPreTrainedModel):
|
| 625 |
+
def __init__(self, config: BeitConfig, add_pooling_layer: bool = True) -> None:
|
| 626 |
+
super().__init__(config)
|
| 627 |
+
self.config = config
|
| 628 |
+
|
| 629 |
+
self.embeddings = BeitEmbeddings(config)
|
| 630 |
+
self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
|
| 631 |
+
|
| 632 |
+
self.layernorm = (
|
| 633 |
+
nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 634 |
+
)
|
| 635 |
+
self.pooler = BeitPooler(config) if add_pooling_layer else None
|
| 636 |
+
|
| 637 |
+
# Initialize weights and apply final processing
|
| 638 |
+
self.post_init()
|
| 639 |
+
|
| 640 |
+
def get_input_embeddings(self):
|
| 641 |
+
return self.embeddings.patch_embeddings
|
| 642 |
+
|
| 643 |
+
def _prune_heads(self, heads_to_prune):
|
| 644 |
+
"""
|
| 645 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 646 |
+
class PreTrainedModel
|
| 647 |
+
"""
|
| 648 |
+
for layer, heads in heads_to_prune.items():
|
| 649 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 650 |
+
|
| 651 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
|
| 652 |
+
@add_code_sample_docstrings(
|
| 653 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 654 |
+
output_type=BeitModelOutputWithPooling,
|
| 655 |
+
config_class=_CONFIG_FOR_DOC,
|
| 656 |
+
modality="vision",
|
| 657 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
| 658 |
+
)
|
| 659 |
+
def forward(
|
| 660 |
+
self,
|
| 661 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 662 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
| 663 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 664 |
+
output_attentions: Optional[bool] = None,
|
| 665 |
+
output_hidden_states: Optional[bool] = None,
|
| 666 |
+
return_dict: Optional[bool] = None,
|
| 667 |
+
) -> Union[tuple, BeitModelOutputWithPooling]:
|
| 668 |
+
r"""
|
| 669 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
|
| 670 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
| 671 |
+
"""
|
| 672 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 673 |
+
output_hidden_states = (
|
| 674 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 675 |
+
)
|
| 676 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 677 |
+
|
| 678 |
+
if pixel_values is None:
|
| 679 |
+
raise ValueError("You have to specify pixel_values")
|
| 680 |
+
|
| 681 |
+
# Prepare head mask if needed
|
| 682 |
+
# 1.0 in head_mask indicate we keep the head
|
| 683 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 684 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 685 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 686 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 687 |
+
|
| 688 |
+
embedding_output, (patch_height, patch_width) = self.embeddings(pixel_values, bool_masked_pos)
|
| 689 |
+
|
| 690 |
+
encoder_outputs = self.encoder(
|
| 691 |
+
embedding_output,
|
| 692 |
+
head_mask=head_mask,
|
| 693 |
+
output_attentions=output_attentions,
|
| 694 |
+
output_hidden_states=output_hidden_states,
|
| 695 |
+
return_dict=return_dict,
|
| 696 |
+
)
|
| 697 |
+
sequence_output = encoder_outputs[0]
|
| 698 |
+
sequence_output = self.layernorm(sequence_output)
|
| 699 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
| 700 |
+
|
| 701 |
+
if not return_dict:
|
| 702 |
+
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
|
| 703 |
+
return head_outputs + encoder_outputs[1:]
|
| 704 |
+
|
| 705 |
+
return BeitModelOutputWithPooling(
|
| 706 |
+
last_hidden_state=sequence_output,
|
| 707 |
+
pooler_output=pooled_output,
|
| 708 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 709 |
+
attentions=encoder_outputs.attentions,
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
class BeitPooler(nn.Module):
|
| 714 |
+
def __init__(self, config: BeitConfig) -> None:
|
| 715 |
+
super().__init__()
|
| 716 |
+
self.layernorm = (
|
| 717 |
+
nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None
|
| 718 |
+
)
|
| 719 |
+
|
| 720 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 721 |
+
if self.layernorm is not None:
|
| 722 |
+
# Mean pool the final hidden states of the patch tokens
|
| 723 |
+
patch_tokens = hidden_states[:, 1:, :]
|
| 724 |
+
pooled_output = self.layernorm(patch_tokens.mean(1))
|
| 725 |
+
else:
|
| 726 |
+
# Pool by simply taking the final hidden state of the [CLS] token
|
| 727 |
+
pooled_output = hidden_states[:, 0]
|
| 728 |
+
|
| 729 |
+
return pooled_output
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
@add_start_docstrings(
|
| 733 |
+
"""Beit Model transformer with a 'language' modeling head on top. BEiT does masked image modeling by predicting
|
| 734 |
+
visual tokens of a Vector-Quantize Variational Autoencoder (VQ-VAE), whereas other vision models like ViT and DeiT
|
| 735 |
+
predict RGB pixel values. As a result, this class is incompatible with [`AutoModelForMaskedImageModeling`], so you
|
| 736 |
+
will need to use [`BeitForMaskedImageModeling`] directly if you wish to do masked image modeling with BEiT.""",
|
| 737 |
+
BEIT_START_DOCSTRING,
|
| 738 |
+
)
|
| 739 |
+
class BeitForMaskedImageModeling(BeitPreTrainedModel):
|
| 740 |
+
def __init__(self, config: BeitConfig) -> None:
|
| 741 |
+
super().__init__(config)
|
| 742 |
+
|
| 743 |
+
self.num_labels = config.num_labels
|
| 744 |
+
self.beit = BeitModel(config, add_pooling_layer=False)
|
| 745 |
+
|
| 746 |
+
# Classifier head
|
| 747 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 748 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
|
| 749 |
+
|
| 750 |
+
# Initialize weights and apply final processing
|
| 751 |
+
self.post_init()
|
| 752 |
+
|
| 753 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
|
| 754 |
+
@replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
|
| 755 |
+
def forward(
|
| 756 |
+
self,
|
| 757 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 758 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
| 759 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 760 |
+
labels: Optional[torch.Tensor] = None,
|
| 761 |
+
output_attentions: Optional[bool] = None,
|
| 762 |
+
output_hidden_states: Optional[bool] = None,
|
| 763 |
+
return_dict: Optional[bool] = None,
|
| 764 |
+
) -> Union[tuple, MaskedLMOutput]:
|
| 765 |
+
r"""
|
| 766 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
|
| 767 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
| 768 |
+
|
| 769 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 770 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
| 771 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 772 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 773 |
+
|
| 774 |
+
Returns:
|
| 775 |
+
|
| 776 |
+
Examples:
|
| 777 |
+
|
| 778 |
+
```python
|
| 779 |
+
>>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
|
| 780 |
+
>>> import torch
|
| 781 |
+
>>> from PIL import Image
|
| 782 |
+
>>> import requests
|
| 783 |
+
|
| 784 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 785 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 786 |
+
|
| 787 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
|
| 788 |
+
>>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
|
| 789 |
+
|
| 790 |
+
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
|
| 791 |
+
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
|
| 792 |
+
>>> # create random boolean mask of shape (batch_size, num_patches)
|
| 793 |
+
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
|
| 794 |
+
|
| 795 |
+
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
|
| 796 |
+
>>> loss, logits = outputs.loss, outputs.logits
|
| 797 |
+
>>> list(logits.shape)
|
| 798 |
+
[1, 196, 8192]
|
| 799 |
+
```"""
|
| 800 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 801 |
+
|
| 802 |
+
outputs = self.beit(
|
| 803 |
+
pixel_values,
|
| 804 |
+
bool_masked_pos=bool_masked_pos,
|
| 805 |
+
head_mask=head_mask,
|
| 806 |
+
output_attentions=output_attentions,
|
| 807 |
+
output_hidden_states=output_hidden_states,
|
| 808 |
+
return_dict=return_dict,
|
| 809 |
+
)
|
| 810 |
+
|
| 811 |
+
sequence_output = outputs[0]
|
| 812 |
+
sequence_output = self.layernorm(sequence_output)
|
| 813 |
+
prediction_scores = self.lm_head(sequence_output[:, 1:])
|
| 814 |
+
|
| 815 |
+
masked_lm_loss = None
|
| 816 |
+
if labels is not None:
|
| 817 |
+
loss_fct = CrossEntropyLoss() # -100 index = padding token
|
| 818 |
+
masked_lm_loss = loss_fct(prediction_scores[bool_masked_pos], labels)
|
| 819 |
+
|
| 820 |
+
if not return_dict:
|
| 821 |
+
output = (prediction_scores,) + outputs[1:]
|
| 822 |
+
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
| 823 |
+
|
| 824 |
+
return MaskedLMOutput(
|
| 825 |
+
loss=masked_lm_loss,
|
| 826 |
+
logits=prediction_scores,
|
| 827 |
+
hidden_states=outputs.hidden_states,
|
| 828 |
+
attentions=outputs.attentions,
|
| 829 |
+
)
|
| 830 |
+
|
| 831 |
+
|
| 832 |
+
@add_start_docstrings(
|
| 833 |
+
"""
|
| 834 |
+
Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
|
| 835 |
+
hidden states of the patch tokens) e.g. for ImageNet.
|
| 836 |
+
""",
|
| 837 |
+
BEIT_START_DOCSTRING,
|
| 838 |
+
)
|
| 839 |
+
class BeitForImageClassification(BeitPreTrainedModel):
|
| 840 |
+
def __init__(self, config: BeitConfig) -> None:
|
| 841 |
+
super().__init__(config)
|
| 842 |
+
|
| 843 |
+
self.num_labels = config.num_labels
|
| 844 |
+
self.beit = BeitModel(config, add_pooling_layer=True)
|
| 845 |
+
|
| 846 |
+
# Classifier head
|
| 847 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
| 848 |
+
|
| 849 |
+
# Initialize weights and apply final processing
|
| 850 |
+
self.post_init()
|
| 851 |
+
|
| 852 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
|
| 853 |
+
@add_code_sample_docstrings(
|
| 854 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
| 855 |
+
output_type=ImageClassifierOutput,
|
| 856 |
+
config_class=_CONFIG_FOR_DOC,
|
| 857 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
| 858 |
+
)
|
| 859 |
+
def forward(
|
| 860 |
+
self,
|
| 861 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 862 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 863 |
+
labels: Optional[torch.Tensor] = None,
|
| 864 |
+
output_attentions: Optional[bool] = None,
|
| 865 |
+
output_hidden_states: Optional[bool] = None,
|
| 866 |
+
return_dict: Optional[bool] = None,
|
| 867 |
+
) -> Union[tuple, ImageClassifierOutput]:
|
| 868 |
+
r"""
|
| 869 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 870 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
| 871 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 872 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 873 |
+
"""
|
| 874 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 875 |
+
outputs = self.beit(
|
| 876 |
+
pixel_values,
|
| 877 |
+
head_mask=head_mask,
|
| 878 |
+
output_attentions=output_attentions,
|
| 879 |
+
output_hidden_states=output_hidden_states,
|
| 880 |
+
return_dict=return_dict,
|
| 881 |
+
)
|
| 882 |
+
|
| 883 |
+
pooled_output = outputs.pooler_output if return_dict else outputs[1]
|
| 884 |
+
|
| 885 |
+
logits = self.classifier(pooled_output)
|
| 886 |
+
|
| 887 |
+
loss = None
|
| 888 |
+
if labels is not None:
|
| 889 |
+
if self.config.problem_type is None:
|
| 890 |
+
if self.num_labels == 1:
|
| 891 |
+
self.config.problem_type = "regression"
|
| 892 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 893 |
+
self.config.problem_type = "single_label_classification"
|
| 894 |
+
else:
|
| 895 |
+
self.config.problem_type = "multi_label_classification"
|
| 896 |
+
|
| 897 |
+
if self.config.problem_type == "regression":
|
| 898 |
+
loss_fct = MSELoss()
|
| 899 |
+
if self.num_labels == 1:
|
| 900 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 901 |
+
else:
|
| 902 |
+
loss = loss_fct(logits, labels)
|
| 903 |
+
elif self.config.problem_type == "single_label_classification":
|
| 904 |
+
loss_fct = CrossEntropyLoss()
|
| 905 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 906 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 907 |
+
loss_fct = BCEWithLogitsLoss()
|
| 908 |
+
loss = loss_fct(logits, labels)
|
| 909 |
+
if not return_dict:
|
| 910 |
+
output = (logits,) + outputs[2:]
|
| 911 |
+
return ((loss,) + output) if loss is not None else output
|
| 912 |
+
|
| 913 |
+
return ImageClassifierOutput(
|
| 914 |
+
loss=loss,
|
| 915 |
+
logits=logits,
|
| 916 |
+
hidden_states=outputs.hidden_states,
|
| 917 |
+
attentions=outputs.attentions,
|
| 918 |
+
)
|
| 919 |
+
|
| 920 |
+
|
| 921 |
+
class BeitConvModule(nn.Module):
|
| 922 |
+
"""
|
| 923 |
+
A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
|
| 924 |
+
layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
|
| 925 |
+
|
| 926 |
+
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
|
| 927 |
+
"""
|
| 928 |
+
|
| 929 |
+
def __init__(
|
| 930 |
+
self,
|
| 931 |
+
in_channels: int,
|
| 932 |
+
out_channels: int,
|
| 933 |
+
kernel_size: Union[int, Tuple[int, int]],
|
| 934 |
+
padding: Union[int, Tuple[int, int], str] = 0,
|
| 935 |
+
bias: bool = False,
|
| 936 |
+
dilation: Union[int, Tuple[int, int]] = 1,
|
| 937 |
+
) -> None:
|
| 938 |
+
super().__init__()
|
| 939 |
+
self.conv = nn.Conv2d(
|
| 940 |
+
in_channels=in_channels,
|
| 941 |
+
out_channels=out_channels,
|
| 942 |
+
kernel_size=kernel_size,
|
| 943 |
+
padding=padding,
|
| 944 |
+
bias=bias,
|
| 945 |
+
dilation=dilation,
|
| 946 |
+
)
|
| 947 |
+
self.bn = nn.BatchNorm2d(out_channels)
|
| 948 |
+
self.activation = nn.ReLU()
|
| 949 |
+
|
| 950 |
+
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
| 951 |
+
output = self.conv(input)
|
| 952 |
+
output = self.bn(output)
|
| 953 |
+
output = self.activation(output)
|
| 954 |
+
|
| 955 |
+
return output
|
| 956 |
+
|
| 957 |
+
|
| 958 |
+
class BeitPyramidPoolingBlock(nn.Module):
|
| 959 |
+
def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
|
| 960 |
+
super().__init__()
|
| 961 |
+
self.layers = [
|
| 962 |
+
nn.AdaptiveAvgPool2d(pool_scale),
|
| 963 |
+
BeitConvModule(in_channels, channels, kernel_size=1),
|
| 964 |
+
]
|
| 965 |
+
for i, layer in enumerate(self.layers):
|
| 966 |
+
self.add_module(str(i), layer)
|
| 967 |
+
|
| 968 |
+
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
| 969 |
+
hidden_state = input
|
| 970 |
+
for layer in self.layers:
|
| 971 |
+
hidden_state = layer(hidden_state)
|
| 972 |
+
return hidden_state
|
| 973 |
+
|
| 974 |
+
|
| 975 |
+
class BeitPyramidPoolingModule(nn.Module):
|
| 976 |
+
"""
|
| 977 |
+
Pyramid Pooling Module (PPM) used in PSPNet.
|
| 978 |
+
|
| 979 |
+
Args:
|
| 980 |
+
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
|
| 981 |
+
Module.
|
| 982 |
+
in_channels (int): Input channels.
|
| 983 |
+
channels (int): Channels after modules, before conv_seg.
|
| 984 |
+
align_corners (bool): align_corners argument of F.interpolate.
|
| 985 |
+
|
| 986 |
+
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
|
| 987 |
+
"""
|
| 988 |
+
|
| 989 |
+
def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
|
| 990 |
+
super().__init__()
|
| 991 |
+
self.pool_scales = pool_scales
|
| 992 |
+
self.align_corners = align_corners
|
| 993 |
+
self.in_channels = in_channels
|
| 994 |
+
self.channels = channels
|
| 995 |
+
self.blocks = []
|
| 996 |
+
for i, pool_scale in enumerate(pool_scales):
|
| 997 |
+
block = BeitPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)
|
| 998 |
+
self.blocks.append(block)
|
| 999 |
+
self.add_module(str(i), block)
|
| 1000 |
+
|
| 1001 |
+
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
|
| 1002 |
+
ppm_outs = []
|
| 1003 |
+
for ppm in self.blocks:
|
| 1004 |
+
ppm_out = ppm(x)
|
| 1005 |
+
upsampled_ppm_out = nn.functional.interpolate(
|
| 1006 |
+
ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
|
| 1007 |
+
)
|
| 1008 |
+
ppm_outs.append(upsampled_ppm_out)
|
| 1009 |
+
return ppm_outs
|
| 1010 |
+
|
| 1011 |
+
|
| 1012 |
+
class BeitUperHead(nn.Module):
|
| 1013 |
+
"""
|
| 1014 |
+
Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
|
| 1015 |
+
[UPerNet](https://arxiv.org/abs/1807.10221).
|
| 1016 |
+
|
| 1017 |
+
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
|
| 1018 |
+
"""
|
| 1019 |
+
|
| 1020 |
+
def __init__(self, config: BeitConfig) -> None:
|
| 1021 |
+
super().__init__()
|
| 1022 |
+
|
| 1023 |
+
self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
|
| 1024 |
+
self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
|
| 1025 |
+
self.channels = config.hidden_size
|
| 1026 |
+
self.align_corners = False
|
| 1027 |
+
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
|
| 1028 |
+
|
| 1029 |
+
# PSP Module
|
| 1030 |
+
self.psp_modules = BeitPyramidPoolingModule(
|
| 1031 |
+
self.pool_scales,
|
| 1032 |
+
self.in_channels[-1],
|
| 1033 |
+
self.channels,
|
| 1034 |
+
align_corners=self.align_corners,
|
| 1035 |
+
)
|
| 1036 |
+
self.bottleneck = BeitConvModule(
|
| 1037 |
+
self.in_channels[-1] + len(self.pool_scales) * self.channels,
|
| 1038 |
+
self.channels,
|
| 1039 |
+
kernel_size=3,
|
| 1040 |
+
padding=1,
|
| 1041 |
+
)
|
| 1042 |
+
# FPN Module
|
| 1043 |
+
self.lateral_convs = nn.ModuleList()
|
| 1044 |
+
self.fpn_convs = nn.ModuleList()
|
| 1045 |
+
for in_channels in self.in_channels[:-1]: # skip the top layer
|
| 1046 |
+
l_conv = BeitConvModule(in_channels, self.channels, kernel_size=1)
|
| 1047 |
+
fpn_conv = BeitConvModule(self.channels, self.channels, kernel_size=3, padding=1)
|
| 1048 |
+
self.lateral_convs.append(l_conv)
|
| 1049 |
+
self.fpn_convs.append(fpn_conv)
|
| 1050 |
+
|
| 1051 |
+
self.fpn_bottleneck = BeitConvModule(
|
| 1052 |
+
len(self.in_channels) * self.channels,
|
| 1053 |
+
self.channels,
|
| 1054 |
+
kernel_size=3,
|
| 1055 |
+
padding=1,
|
| 1056 |
+
)
|
| 1057 |
+
|
| 1058 |
+
def psp_forward(self, inputs):
|
| 1059 |
+
x = inputs[-1]
|
| 1060 |
+
psp_outs = [x]
|
| 1061 |
+
psp_outs.extend(self.psp_modules(x))
|
| 1062 |
+
psp_outs = torch.cat(psp_outs, dim=1)
|
| 1063 |
+
output = self.bottleneck(psp_outs)
|
| 1064 |
+
|
| 1065 |
+
return output
|
| 1066 |
+
|
| 1067 |
+
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
|
| 1068 |
+
# build laterals
|
| 1069 |
+
laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
|
| 1070 |
+
|
| 1071 |
+
laterals.append(self.psp_forward(encoder_hidden_states))
|
| 1072 |
+
|
| 1073 |
+
# build top-down path
|
| 1074 |
+
used_backbone_levels = len(laterals)
|
| 1075 |
+
for i in range(used_backbone_levels - 1, 0, -1):
|
| 1076 |
+
prev_shape = laterals[i - 1].shape[2:]
|
| 1077 |
+
laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(
|
| 1078 |
+
laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners
|
| 1079 |
+
)
|
| 1080 |
+
|
| 1081 |
+
# build outputs
|
| 1082 |
+
fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
|
| 1083 |
+
# append psp feature
|
| 1084 |
+
fpn_outs.append(laterals[-1])
|
| 1085 |
+
|
| 1086 |
+
for i in range(used_backbone_levels - 1, 0, -1):
|
| 1087 |
+
fpn_outs[i] = nn.functional.interpolate(
|
| 1088 |
+
fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners
|
| 1089 |
+
)
|
| 1090 |
+
fpn_outs = torch.cat(fpn_outs, dim=1)
|
| 1091 |
+
output = self.fpn_bottleneck(fpn_outs)
|
| 1092 |
+
output = self.classifier(output)
|
| 1093 |
+
|
| 1094 |
+
return output
|
| 1095 |
+
|
| 1096 |
+
|
| 1097 |
+
class BeitFCNHead(nn.Module):
|
| 1098 |
+
"""
|
| 1099 |
+
Fully Convolution Networks for Semantic Segmentation. This head is implemented of
|
| 1100 |
+
[FCNNet](https://arxiv.org/abs/1411.4038>).
|
| 1101 |
+
|
| 1102 |
+
Args:
|
| 1103 |
+
config (BeitConfig): Configuration.
|
| 1104 |
+
in_channels
|
| 1105 |
+
kernel_size (int): The kernel size for convs in the head. Default: 3.
|
| 1106 |
+
dilation (int): The dilation rate for convs in the head. Default: 1.
|
| 1107 |
+
|
| 1108 |
+
|
| 1109 |
+
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
|
| 1110 |
+
"""
|
| 1111 |
+
|
| 1112 |
+
def __init__(
|
| 1113 |
+
self, config: BeitConfig, in_index: int = 2, kernel_size: int = 3, dilation: Union[int, Tuple[int, int]] = 1
|
| 1114 |
+
) -> None:
|
| 1115 |
+
super().__init__()
|
| 1116 |
+
self.in_channels = config.hidden_size
|
| 1117 |
+
self.channels = config.auxiliary_channels
|
| 1118 |
+
self.num_convs = config.auxiliary_num_convs
|
| 1119 |
+
self.concat_input = config.auxiliary_concat_input
|
| 1120 |
+
self.in_index = in_index
|
| 1121 |
+
|
| 1122 |
+
conv_padding = (kernel_size // 2) * dilation
|
| 1123 |
+
convs = []
|
| 1124 |
+
convs.append(
|
| 1125 |
+
BeitConvModule(
|
| 1126 |
+
self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
|
| 1127 |
+
)
|
| 1128 |
+
)
|
| 1129 |
+
for i in range(self.num_convs - 1):
|
| 1130 |
+
convs.append(
|
| 1131 |
+
BeitConvModule(
|
| 1132 |
+
self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
|
| 1133 |
+
)
|
| 1134 |
+
)
|
| 1135 |
+
if self.num_convs == 0:
|
| 1136 |
+
self.convs = nn.Identity()
|
| 1137 |
+
else:
|
| 1138 |
+
self.convs = nn.Sequential(*convs)
|
| 1139 |
+
if self.concat_input:
|
| 1140 |
+
self.conv_cat = BeitConvModule(
|
| 1141 |
+
self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2
|
| 1142 |
+
)
|
| 1143 |
+
|
| 1144 |
+
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
|
| 1145 |
+
|
| 1146 |
+
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
|
| 1147 |
+
# just take the relevant feature maps
|
| 1148 |
+
hidden_states = encoder_hidden_states[self.in_index]
|
| 1149 |
+
output = self.convs(hidden_states)
|
| 1150 |
+
if self.concat_input:
|
| 1151 |
+
output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
|
| 1152 |
+
output = self.classifier(output)
|
| 1153 |
+
return output
|
| 1154 |
+
|
| 1155 |
+
|
| 1156 |
+
@add_start_docstrings(
|
| 1157 |
+
"""
|
| 1158 |
+
Beit Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.
|
| 1159 |
+
""",
|
| 1160 |
+
BEIT_START_DOCSTRING,
|
| 1161 |
+
)
|
| 1162 |
+
class BeitForSemanticSegmentation(BeitPreTrainedModel):
|
| 1163 |
+
def __init__(self, config: BeitConfig) -> None:
|
| 1164 |
+
super().__init__(config)
|
| 1165 |
+
|
| 1166 |
+
self.num_labels = config.num_labels
|
| 1167 |
+
self.beit = BeitModel(config, add_pooling_layer=False)
|
| 1168 |
+
|
| 1169 |
+
# FPNs
|
| 1170 |
+
if len(self.config.out_indices) != 4:
|
| 1171 |
+
raise ValueError(
|
| 1172 |
+
"BeitForSemanticSegmentation requires config.out_indices to be a list of 4 integers, "
|
| 1173 |
+
"specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
|
| 1174 |
+
"a base-sized architecture."
|
| 1175 |
+
)
|
| 1176 |
+
self.fpn1 = nn.Sequential(
|
| 1177 |
+
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
|
| 1178 |
+
nn.BatchNorm2d(config.hidden_size),
|
| 1179 |
+
nn.GELU(),
|
| 1180 |
+
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
|
| 1181 |
+
)
|
| 1182 |
+
self.fpn2 = nn.Sequential(
|
| 1183 |
+
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
|
| 1184 |
+
)
|
| 1185 |
+
self.fpn3 = nn.Identity()
|
| 1186 |
+
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
|
| 1187 |
+
|
| 1188 |
+
# Semantic segmentation head(s)
|
| 1189 |
+
self.decode_head = BeitUperHead(config)
|
| 1190 |
+
self.auxiliary_head = BeitFCNHead(config) if config.use_auxiliary_head else None
|
| 1191 |
+
|
| 1192 |
+
# Initialize weights and apply final processing
|
| 1193 |
+
self.post_init()
|
| 1194 |
+
|
| 1195 |
+
def compute_loss(self, logits, auxiliary_logits, labels):
|
| 1196 |
+
# upsample logits to the images' original size
|
| 1197 |
+
upsampled_logits = nn.functional.interpolate(
|
| 1198 |
+
logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
|
| 1199 |
+
)
|
| 1200 |
+
if auxiliary_logits is not None:
|
| 1201 |
+
upsampled_auxiliary_logits = nn.functional.interpolate(
|
| 1202 |
+
auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
|
| 1203 |
+
)
|
| 1204 |
+
# compute weighted loss
|
| 1205 |
+
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
|
| 1206 |
+
main_loss = loss_fct(upsampled_logits, labels)
|
| 1207 |
+
loss = main_loss
|
| 1208 |
+
if auxiliary_logits is not None:
|
| 1209 |
+
auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
|
| 1210 |
+
loss += self.config.auxiliary_loss_weight * auxiliary_loss
|
| 1211 |
+
|
| 1212 |
+
return loss
|
| 1213 |
+
|
| 1214 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
|
| 1215 |
+
@replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
|
| 1216 |
+
def forward(
|
| 1217 |
+
self,
|
| 1218 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 1219 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1220 |
+
labels: Optional[torch.Tensor] = None,
|
| 1221 |
+
output_attentions: Optional[bool] = None,
|
| 1222 |
+
output_hidden_states: Optional[bool] = None,
|
| 1223 |
+
return_dict: Optional[bool] = None,
|
| 1224 |
+
) -> Union[tuple, SemanticSegmenterOutput]:
|
| 1225 |
+
r"""
|
| 1226 |
+
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
|
| 1227 |
+
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
|
| 1228 |
+
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
|
| 1229 |
+
|
| 1230 |
+
Returns:
|
| 1231 |
+
|
| 1232 |
+
Examples:
|
| 1233 |
+
|
| 1234 |
+
```python
|
| 1235 |
+
>>> from transformers import AutoImageProcessor, BeitForSemanticSegmentation
|
| 1236 |
+
>>> from PIL import Image
|
| 1237 |
+
>>> import requests
|
| 1238 |
+
|
| 1239 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1240 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1241 |
+
|
| 1242 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
|
| 1243 |
+
>>> model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
|
| 1244 |
+
|
| 1245 |
+
>>> inputs = image_processor(images=image, return_tensors="pt")
|
| 1246 |
+
>>> outputs = model(**inputs)
|
| 1247 |
+
>>> # logits are of shape (batch_size, num_labels, height, width)
|
| 1248 |
+
>>> logits = outputs.logits
|
| 1249 |
+
```"""
|
| 1250 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1251 |
+
output_hidden_states = (
|
| 1252 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1253 |
+
)
|
| 1254 |
+
|
| 1255 |
+
outputs = self.beit(
|
| 1256 |
+
pixel_values,
|
| 1257 |
+
head_mask=head_mask,
|
| 1258 |
+
output_attentions=output_attentions,
|
| 1259 |
+
output_hidden_states=True, # we need the intermediate hidden states
|
| 1260 |
+
return_dict=return_dict,
|
| 1261 |
+
)
|
| 1262 |
+
|
| 1263 |
+
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
|
| 1264 |
+
|
| 1265 |
+
# only keep certain features, and reshape
|
| 1266 |
+
# note that we do +1 as the encoder_hidden_states also includes the initial embeddings
|
| 1267 |
+
features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
|
| 1268 |
+
batch_size = pixel_values.shape[0]
|
| 1269 |
+
patch_resolution = self.config.image_size // self.config.patch_size
|
| 1270 |
+
features = [
|
| 1271 |
+
x[:, 1:, :].permute(0, 2, 1).reshape(batch_size, -1, patch_resolution, patch_resolution) for x in features
|
| 1272 |
+
]
|
| 1273 |
+
|
| 1274 |
+
# apply FPNs
|
| 1275 |
+
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
|
| 1276 |
+
for i in range(len(features)):
|
| 1277 |
+
features[i] = ops[i](features[i])
|
| 1278 |
+
|
| 1279 |
+
logits = self.decode_head(features)
|
| 1280 |
+
|
| 1281 |
+
auxiliary_logits = None
|
| 1282 |
+
if self.auxiliary_head is not None:
|
| 1283 |
+
auxiliary_logits = self.auxiliary_head(features)
|
| 1284 |
+
|
| 1285 |
+
loss = None
|
| 1286 |
+
if labels is not None:
|
| 1287 |
+
if self.config.num_labels == 1:
|
| 1288 |
+
raise ValueError("The number of labels should be greater than one")
|
| 1289 |
+
else:
|
| 1290 |
+
loss = self.compute_loss(logits, auxiliary_logits, labels)
|
| 1291 |
+
|
| 1292 |
+
if not return_dict:
|
| 1293 |
+
if output_hidden_states:
|
| 1294 |
+
output = (logits,) + outputs[1:]
|
| 1295 |
+
else:
|
| 1296 |
+
output = (logits,) + outputs[2:]
|
| 1297 |
+
return ((loss,) + output) if loss is not None else output
|
| 1298 |
+
|
| 1299 |
+
return SemanticSegmenterOutput(
|
| 1300 |
+
loss=loss,
|
| 1301 |
+
logits=logits,
|
| 1302 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
| 1303 |
+
attentions=outputs.attentions,
|
| 1304 |
+
)
|
| 1305 |
+
|
| 1306 |
+
|
| 1307 |
+
@add_start_docstrings(
|
| 1308 |
+
"""
|
| 1309 |
+
BEiT backbone, to be used with frameworks like DETR and MaskFormer.
|
| 1310 |
+
""",
|
| 1311 |
+
BEIT_START_DOCSTRING,
|
| 1312 |
+
)
|
| 1313 |
+
class BeitBackbone(BeitPreTrainedModel, BackboneMixin):
|
| 1314 |
+
def __init__(self, config):
|
| 1315 |
+
super().__init__(config)
|
| 1316 |
+
super()._init_backbone(config)
|
| 1317 |
+
|
| 1318 |
+
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
|
| 1319 |
+
self.embeddings = BeitEmbeddings(config)
|
| 1320 |
+
self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
|
| 1321 |
+
|
| 1322 |
+
if config.add_fpn:
|
| 1323 |
+
if len(self.config.out_indices) != 4:
|
| 1324 |
+
raise ValueError(
|
| 1325 |
+
"BeitBackbone requires config.out_indices to be a list of 4 integers, "
|
| 1326 |
+
"specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
|
| 1327 |
+
"a base-sized architecture."
|
| 1328 |
+
)
|
| 1329 |
+
hidden_size = config.hidden_size
|
| 1330 |
+
self.fpn1 = nn.Sequential(
|
| 1331 |
+
nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2),
|
| 1332 |
+
nn.BatchNorm2d(hidden_size, eps=config.batch_norm_eps),
|
| 1333 |
+
nn.GELU(),
|
| 1334 |
+
nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2),
|
| 1335 |
+
)
|
| 1336 |
+
|
| 1337 |
+
self.fpn2 = nn.Sequential(nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2))
|
| 1338 |
+
self.fpn3 = nn.Identity()
|
| 1339 |
+
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
|
| 1340 |
+
|
| 1341 |
+
# initialize weights and apply final processing
|
| 1342 |
+
self.post_init()
|
| 1343 |
+
|
| 1344 |
+
def get_input_embeddings(self):
|
| 1345 |
+
return self.embeddings.patch_embeddings
|
| 1346 |
+
|
| 1347 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
|
| 1348 |
+
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
|
| 1349 |
+
def forward(
|
| 1350 |
+
self,
|
| 1351 |
+
pixel_values: Tensor,
|
| 1352 |
+
output_hidden_states: Optional[bool] = None,
|
| 1353 |
+
output_attentions: Optional[bool] = None,
|
| 1354 |
+
return_dict: Optional[bool] = None,
|
| 1355 |
+
) -> BackboneOutput:
|
| 1356 |
+
"""
|
| 1357 |
+
Returns:
|
| 1358 |
+
|
| 1359 |
+
Examples:
|
| 1360 |
+
|
| 1361 |
+
```python
|
| 1362 |
+
>>> from transformers import AutoImageProcessor, AutoBackbone
|
| 1363 |
+
>>> import torch
|
| 1364 |
+
>>> from PIL import Image
|
| 1365 |
+
>>> import requests
|
| 1366 |
+
|
| 1367 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1368 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1369 |
+
|
| 1370 |
+
>>> processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
|
| 1371 |
+
>>> model = AutoBackbone.from_pretrained(
|
| 1372 |
+
... "microsoft/beit-base-patch16-224", out_features=["stage1", "stage2", "stage3", "stage4"]
|
| 1373 |
+
... )
|
| 1374 |
+
|
| 1375 |
+
>>> inputs = processor(image, return_tensors="pt")
|
| 1376 |
+
|
| 1377 |
+
>>> outputs = model(**inputs)
|
| 1378 |
+
>>> feature_maps = outputs.feature_maps
|
| 1379 |
+
>>> list(feature_maps[-1].shape)
|
| 1380 |
+
[1, 768, 14, 14]
|
| 1381 |
+
```"""
|
| 1382 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1383 |
+
output_hidden_states = (
|
| 1384 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1385 |
+
)
|
| 1386 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1387 |
+
|
| 1388 |
+
batch_size = pixel_values.shape[0]
|
| 1389 |
+
embedding_output, (patch_height, patch_width) = self.embeddings(pixel_values)
|
| 1390 |
+
|
| 1391 |
+
outputs = self.encoder(
|
| 1392 |
+
embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
|
| 1393 |
+
)
|
| 1394 |
+
|
| 1395 |
+
hidden_states = outputs.hidden_states if return_dict else outputs[1]
|
| 1396 |
+
|
| 1397 |
+
feature_maps = ()
|
| 1398 |
+
for stage, hidden_state in zip(self.stage_names, hidden_states):
|
| 1399 |
+
if stage in self.out_features:
|
| 1400 |
+
if self.config.reshape_hidden_states:
|
| 1401 |
+
hidden_state = hidden_state[:, 1:, :]
|
| 1402 |
+
hidden_state = hidden_state.permute(0, 2, 1)
|
| 1403 |
+
hidden_state = hidden_state.reshape(batch_size, -1, patch_height, patch_width)
|
| 1404 |
+
|
| 1405 |
+
feature_maps += (hidden_state,)
|
| 1406 |
+
|
| 1407 |
+
if self.config.add_fpn:
|
| 1408 |
+
feature_maps = [
|
| 1409 |
+
self.fpn1(feature_maps[0]),
|
| 1410 |
+
self.fpn2(feature_maps[1]),
|
| 1411 |
+
self.fpn3(feature_maps[2]),
|
| 1412 |
+
self.fpn4(feature_maps[3]),
|
| 1413 |
+
]
|
| 1414 |
+
feature_maps = tuple(feature_maps)
|
| 1415 |
+
|
| 1416 |
+
if not return_dict:
|
| 1417 |
+
if output_hidden_states:
|
| 1418 |
+
output = (feature_maps,) + outputs[1:]
|
| 1419 |
+
else:
|
| 1420 |
+
output = (feature_maps,) + outputs[2:]
|
| 1421 |
+
return output
|
| 1422 |
+
|
| 1423 |
+
return BackboneOutput(
|
| 1424 |
+
feature_maps=feature_maps,
|
| 1425 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
| 1426 |
+
attentions=outputs.attentions,
|
| 1427 |
+
)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py
ADDED
|
@@ -0,0 +1,948 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 Microsoft Research and the HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
from typing import Callable, List, Optional, Tuple
|
| 18 |
+
|
| 19 |
+
import flax
|
| 20 |
+
import flax.linen as nn
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
import numpy as np
|
| 24 |
+
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
|
| 25 |
+
from flax.linen.attention import dot_product_attention_weights
|
| 26 |
+
from flax.traverse_util import flatten_dict, unflatten_dict
|
| 27 |
+
|
| 28 |
+
from ...modeling_flax_outputs import (
|
| 29 |
+
FlaxBaseModelOutput,
|
| 30 |
+
FlaxBaseModelOutputWithPooling,
|
| 31 |
+
FlaxMaskedLMOutput,
|
| 32 |
+
FlaxSequenceClassifierOutput,
|
| 33 |
+
)
|
| 34 |
+
from ...modeling_flax_utils import (
|
| 35 |
+
ACT2FN,
|
| 36 |
+
FlaxPreTrainedModel,
|
| 37 |
+
append_replace_return_docstrings,
|
| 38 |
+
overwrite_call_docstring,
|
| 39 |
+
)
|
| 40 |
+
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward
|
| 41 |
+
from .configuration_beit import BeitConfig
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@flax.struct.dataclass
|
| 45 |
+
class FlaxBeitModelOutputWithPooling(FlaxBaseModelOutputWithPooling):
|
| 46 |
+
"""
|
| 47 |
+
Class for outputs of [`FlaxBeitModel`].
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 51 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 52 |
+
pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`):
|
| 53 |
+
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
|
| 54 |
+
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
|
| 55 |
+
will be returned.
|
| 56 |
+
hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 57 |
+
Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
|
| 58 |
+
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
|
| 59 |
+
the initial embedding outputs.
|
| 60 |
+
attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 61 |
+
Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| 62 |
+
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
|
| 63 |
+
the self-attention heads.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
BEIT_START_DOCSTRING = r"""
|
| 68 |
+
|
| 69 |
+
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 70 |
+
library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
|
| 71 |
+
|
| 72 |
+
This model is also a
|
| 73 |
+
[flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
|
| 74 |
+
a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
|
| 75 |
+
behavior.
|
| 76 |
+
|
| 77 |
+
Finally, this model supports inherent JAX features such as:
|
| 78 |
+
|
| 79 |
+
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
|
| 80 |
+
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
|
| 81 |
+
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
|
| 82 |
+
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
|
| 83 |
+
|
| 84 |
+
Parameters:
|
| 85 |
+
config ([`BeitConfig`]): Model configuration class with all the parameters of the model.
|
| 86 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 87 |
+
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
|
| 88 |
+
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
|
| 89 |
+
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
|
| 90 |
+
`jax.numpy.bfloat16` (on TPUs).
|
| 91 |
+
|
| 92 |
+
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
|
| 93 |
+
specified all the computation will be performed with the given `dtype`.
|
| 94 |
+
|
| 95 |
+
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
|
| 96 |
+
parameters.**
|
| 97 |
+
|
| 98 |
+
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
|
| 99 |
+
[`~FlaxPreTrainedModel.to_bf16`].
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
BEIT_INPUTS_DOCSTRING = r"""
|
| 103 |
+
Args:
|
| 104 |
+
pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
|
| 105 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 106 |
+
[`AutoImageProcessor.__call__`] for details.
|
| 107 |
+
|
| 108 |
+
output_attentions (`bool`, *optional*):
|
| 109 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 110 |
+
tensors for more detail.
|
| 111 |
+
output_hidden_states (`bool`, *optional*):
|
| 112 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 113 |
+
more detail.
|
| 114 |
+
return_dict (`bool`, *optional*):
|
| 115 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def relative_position_index_init(window_size: Tuple[int, int]) -> jnp.ndarray:
|
| 120 |
+
"""
|
| 121 |
+
get pair-wise relative position index for each token inside the window
|
| 122 |
+
"""
|
| 123 |
+
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
| 124 |
+
|
| 125 |
+
coords_h = np.arange(window_size[0])
|
| 126 |
+
coords_w = np.arange(window_size[1])
|
| 127 |
+
coords = np.stack(np.meshgrid(coords_h, coords_w, indexing="ij")) # 2, Wh, Ww
|
| 128 |
+
coords_flatten = np.reshape(coords, (2, -1))
|
| 129 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
| 130 |
+
relative_coords = np.transpose(relative_coords, (1, 2, 0)) # Wh*Ww, Wh*Ww, 2
|
| 131 |
+
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
| 132 |
+
relative_coords[:, :, 1] += window_size[1] - 1
|
| 133 |
+
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
| 134 |
+
|
| 135 |
+
relative_position_index = np.zeros(shape=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
|
| 136 |
+
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
| 137 |
+
relative_position_index[0, 0:] = num_relative_distance - 3
|
| 138 |
+
relative_position_index[0:, 0] = num_relative_distance - 2
|
| 139 |
+
relative_position_index[0, 0] = num_relative_distance - 1
|
| 140 |
+
return jnp.array(relative_position_index)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def ones_with_scale(key, shape, scale, dtype=jnp.float32):
|
| 144 |
+
return jnp.ones(shape, dtype) * scale
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class FlaxBeitDropPath(nn.Module):
|
| 148 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
| 149 |
+
|
| 150 |
+
rate: float
|
| 151 |
+
|
| 152 |
+
@nn.module.compact
|
| 153 |
+
def __call__(self, inputs, deterministic: Optional[bool] = True):
|
| 154 |
+
if self.rate == 0.0:
|
| 155 |
+
return inputs
|
| 156 |
+
keep_prob = 1.0 - self.rate
|
| 157 |
+
if deterministic:
|
| 158 |
+
return inputs
|
| 159 |
+
else:
|
| 160 |
+
shape = (inputs.shape[0],) + (1,) * (inputs.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
| 161 |
+
rng = self.make_rng("droppath")
|
| 162 |
+
random_tensor = keep_prob + jax.random.uniform(rng, shape=shape, dtype=inputs.dtype)
|
| 163 |
+
binary_tensor = jnp.floor(random_tensor)
|
| 164 |
+
output = inputs / keep_prob * binary_tensor
|
| 165 |
+
return output
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class FlaxBeitPatchEmbeddings(nn.Module):
|
| 169 |
+
config: BeitConfig
|
| 170 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 171 |
+
|
| 172 |
+
def setup(self):
|
| 173 |
+
self.num_channels = self.config.num_channels
|
| 174 |
+
image_size = self.config.image_size
|
| 175 |
+
patch_size = self.config.patch_size
|
| 176 |
+
num_patches = (image_size // patch_size) * (image_size // patch_size)
|
| 177 |
+
patch_shape = (image_size // patch_size, image_size // patch_size)
|
| 178 |
+
self.num_patches = num_patches
|
| 179 |
+
self.patch_shape = patch_shape
|
| 180 |
+
self.projection = nn.Conv(
|
| 181 |
+
self.config.hidden_size,
|
| 182 |
+
kernel_size=(patch_size, patch_size),
|
| 183 |
+
strides=(patch_size, patch_size),
|
| 184 |
+
padding="VALID",
|
| 185 |
+
dtype=self.dtype,
|
| 186 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
def __call__(self, pixel_values):
|
| 190 |
+
num_channels = pixel_values.shape[-1]
|
| 191 |
+
if num_channels != self.num_channels:
|
| 192 |
+
raise ValueError(
|
| 193 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
| 194 |
+
)
|
| 195 |
+
embeddings = self.projection(pixel_values)
|
| 196 |
+
batch_size, _, _, channels = embeddings.shape
|
| 197 |
+
return jnp.reshape(embeddings, (batch_size, -1, channels))
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class FlaxBeitEmbeddings(nn.Module):
|
| 201 |
+
"""Construct the CLS token, position and patch embeddings."""
|
| 202 |
+
|
| 203 |
+
config: BeitConfig
|
| 204 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 205 |
+
|
| 206 |
+
def setup(self):
|
| 207 |
+
self.cls_token = self.param("cls_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
|
| 208 |
+
if self.config.use_mask_token:
|
| 209 |
+
self.mask_token = self.param("mask_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
|
| 210 |
+
self.patch_embeddings = FlaxBeitPatchEmbeddings(self.config, dtype=self.dtype)
|
| 211 |
+
num_patches = self.patch_embeddings.num_patches
|
| 212 |
+
if self.config.use_absolute_position_embeddings:
|
| 213 |
+
self.position_embeddings = self.param(
|
| 214 |
+
"position_embeddings", nn.initializers.zeros, (1, num_patches + 1, self.config.hidden_size)
|
| 215 |
+
)
|
| 216 |
+
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
|
| 217 |
+
|
| 218 |
+
def __call__(self, pixel_values, bool_masked_pos=None, deterministic=True):
|
| 219 |
+
embeddings = self.patch_embeddings(pixel_values)
|
| 220 |
+
batch_size, seq_len, _ = embeddings.shape
|
| 221 |
+
|
| 222 |
+
cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size))
|
| 223 |
+
cls_tokens = cls_tokens.astype(embeddings.dtype)
|
| 224 |
+
|
| 225 |
+
if bool_masked_pos is not None:
|
| 226 |
+
mask_tokens = jnp.broadcast_to(self.mask_token, (batch_size, seq_len, self.config.hidden_size))
|
| 227 |
+
mask_tokens = mask_tokens.astype(embeddings.dtype)
|
| 228 |
+
# replace the masked visual tokens by mask_tokens
|
| 229 |
+
w = jnp.expand_dims(bool_masked_pos, axis=-1)
|
| 230 |
+
embeddings = embeddings * (1 - w) + mask_tokens * w
|
| 231 |
+
|
| 232 |
+
embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1)
|
| 233 |
+
|
| 234 |
+
if self.config.use_absolute_position_embeddings:
|
| 235 |
+
embeddings = embeddings + self.position_embeddings.astype(embeddings.dtype)
|
| 236 |
+
|
| 237 |
+
embeddings = self.dropout(embeddings, deterministic=deterministic)
|
| 238 |
+
return embeddings
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
class FlaxBeitRelativePositionBias(nn.Module):
|
| 242 |
+
config: BeitConfig
|
| 243 |
+
window_size: Tuple[int, int]
|
| 244 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 245 |
+
|
| 246 |
+
def setup(self):
|
| 247 |
+
num_relative_distance = (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) + 3
|
| 248 |
+
self.relative_position_bias_table = self.param(
|
| 249 |
+
"relative_position_bias_table",
|
| 250 |
+
nn.initializers.zeros,
|
| 251 |
+
(num_relative_distance, self.config.num_attention_heads),
|
| 252 |
+
) # 2*Wh-1 * 2*Ww-1, nH
|
| 253 |
+
# cls to token & token 2 cls & cls to cls
|
| 254 |
+
|
| 255 |
+
self.relative_position_index = relative_position_index_init(self.window_size)
|
| 256 |
+
|
| 257 |
+
def __call__(self):
|
| 258 |
+
index = self.relative_position_index.reshape(-1)
|
| 259 |
+
shape = (self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1)
|
| 260 |
+
relative_position_bias = self.relative_position_bias_table[index].reshape(shape) # Wh*Ww,Wh*Ww,nH
|
| 261 |
+
return jnp.transpose(relative_position_bias, (2, 0, 1))
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
class FlaxBeitSelfAttention(nn.Module):
|
| 265 |
+
config: BeitConfig
|
| 266 |
+
window_size: Tuple[int, int]
|
| 267 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 268 |
+
|
| 269 |
+
def setup(self):
|
| 270 |
+
if self.config.hidden_size % self.config.num_attention_heads != 0 and not hasattr(
|
| 271 |
+
self.config, "embedding_size"
|
| 272 |
+
):
|
| 273 |
+
raise ValueError(
|
| 274 |
+
f"The hidden size {self.config.hidden_size,} is not a multiple of the number of attention "
|
| 275 |
+
f"heads {self.config.num_attention_heads}."
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
self.query = nn.Dense(
|
| 279 |
+
self.config.hidden_size,
|
| 280 |
+
dtype=self.dtype,
|
| 281 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
| 282 |
+
)
|
| 283 |
+
self.key = nn.Dense(
|
| 284 |
+
self.config.hidden_size,
|
| 285 |
+
dtype=self.dtype,
|
| 286 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
| 287 |
+
use_bias=False,
|
| 288 |
+
)
|
| 289 |
+
self.value = nn.Dense(
|
| 290 |
+
self.config.hidden_size,
|
| 291 |
+
dtype=self.dtype,
|
| 292 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
self.relative_position_bias = (
|
| 296 |
+
FlaxBeitRelativePositionBias(self.config, window_size=self.window_size, dtype=self.dtype)
|
| 297 |
+
if self.window_size
|
| 298 |
+
else None
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
def __call__(
|
| 302 |
+
self, hidden_states, relative_position_bias=None, deterministic: bool = True, output_attentions: bool = False
|
| 303 |
+
):
|
| 304 |
+
head_dim = self.config.hidden_size // self.config.num_attention_heads
|
| 305 |
+
|
| 306 |
+
query_states = self.query(hidden_states).reshape(
|
| 307 |
+
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
|
| 308 |
+
)
|
| 309 |
+
value_states = self.value(hidden_states).reshape(
|
| 310 |
+
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
|
| 311 |
+
)
|
| 312 |
+
key_states = self.key(hidden_states).reshape(
|
| 313 |
+
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
dropout_rng = None
|
| 317 |
+
if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
|
| 318 |
+
dropout_rng = self.make_rng("dropout")
|
| 319 |
+
|
| 320 |
+
attention_bias = jnp.array(0.0, dtype=self.dtype)
|
| 321 |
+
# Add relative position bias if present.
|
| 322 |
+
if self.relative_position_bias is not None:
|
| 323 |
+
attention_bias = jnp.expand_dims(self.relative_position_bias(), 0)
|
| 324 |
+
attention_bias = attention_bias.astype(query_states.dtype)
|
| 325 |
+
|
| 326 |
+
# Add shared relative position bias if provided.
|
| 327 |
+
if relative_position_bias is not None:
|
| 328 |
+
attention_bias = attention_bias + relative_position_bias.astype(attention_bias.dtype)
|
| 329 |
+
|
| 330 |
+
attn_weights = dot_product_attention_weights(
|
| 331 |
+
query_states,
|
| 332 |
+
key_states,
|
| 333 |
+
bias=attention_bias,
|
| 334 |
+
dropout_rng=dropout_rng,
|
| 335 |
+
dropout_rate=self.config.attention_probs_dropout_prob,
|
| 336 |
+
broadcast_dropout=True,
|
| 337 |
+
deterministic=deterministic,
|
| 338 |
+
dtype=self.dtype,
|
| 339 |
+
precision=None,
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
|
| 343 |
+
attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
|
| 344 |
+
|
| 345 |
+
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
|
| 346 |
+
return outputs
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
class FlaxBeitSelfOutput(nn.Module):
|
| 350 |
+
config: BeitConfig
|
| 351 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 352 |
+
|
| 353 |
+
def setup(self):
|
| 354 |
+
self.dense = nn.Dense(
|
| 355 |
+
self.config.hidden_size,
|
| 356 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
| 357 |
+
dtype=self.dtype,
|
| 358 |
+
)
|
| 359 |
+
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
|
| 360 |
+
|
| 361 |
+
def __call__(self, hidden_states, deterministic: bool = True):
|
| 362 |
+
hidden_states = self.dense(hidden_states)
|
| 363 |
+
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
|
| 364 |
+
return hidden_states
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
class FlaxBeitAttention(nn.Module):
|
| 368 |
+
config: BeitConfig
|
| 369 |
+
window_size: Tuple[int, int]
|
| 370 |
+
dtype: jnp.dtype = jnp.float32
|
| 371 |
+
|
| 372 |
+
def setup(self):
|
| 373 |
+
self.attention = FlaxBeitSelfAttention(self.config, self.window_size, dtype=self.dtype)
|
| 374 |
+
self.output = FlaxBeitSelfOutput(self.config, dtype=self.dtype)
|
| 375 |
+
|
| 376 |
+
def __call__(
|
| 377 |
+
self, hidden_states, relative_position_bias=None, deterministic=True, output_attentions: bool = False
|
| 378 |
+
):
|
| 379 |
+
attn_outputs = self.attention(
|
| 380 |
+
hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions
|
| 381 |
+
)
|
| 382 |
+
attn_output = attn_outputs[0]
|
| 383 |
+
attn_output = self.output(attn_output, deterministic=deterministic)
|
| 384 |
+
|
| 385 |
+
outputs = (attn_output,)
|
| 386 |
+
|
| 387 |
+
if output_attentions:
|
| 388 |
+
outputs += (attn_outputs[1],)
|
| 389 |
+
|
| 390 |
+
return outputs
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
class FlaxBeitIntermediate(nn.Module):
|
| 394 |
+
config: BeitConfig
|
| 395 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 396 |
+
|
| 397 |
+
def setup(self):
|
| 398 |
+
self.dense = nn.Dense(
|
| 399 |
+
self.config.intermediate_size,
|
| 400 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
| 401 |
+
dtype=self.dtype,
|
| 402 |
+
)
|
| 403 |
+
self.activation = ACT2FN[self.config.hidden_act]
|
| 404 |
+
|
| 405 |
+
def __call__(self, hidden_states):
|
| 406 |
+
hidden_states = self.dense(hidden_states)
|
| 407 |
+
hidden_states = self.activation(hidden_states)
|
| 408 |
+
|
| 409 |
+
return hidden_states
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
class FlaxBeitOutput(nn.Module):
|
| 413 |
+
config: BeitConfig
|
| 414 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 415 |
+
|
| 416 |
+
def setup(self):
|
| 417 |
+
self.dense = nn.Dense(
|
| 418 |
+
self.config.hidden_size,
|
| 419 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
| 420 |
+
dtype=self.dtype,
|
| 421 |
+
)
|
| 422 |
+
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
|
| 423 |
+
|
| 424 |
+
def __call__(self, hidden_states, deterministic: bool = True):
|
| 425 |
+
hidden_states = self.dense(hidden_states)
|
| 426 |
+
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
|
| 427 |
+
|
| 428 |
+
return hidden_states
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
class FlaxBeitLayer(nn.Module):
|
| 432 |
+
config: BeitConfig
|
| 433 |
+
window_size: Tuple[int, int]
|
| 434 |
+
drop_path_rate: float
|
| 435 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 436 |
+
|
| 437 |
+
def setup(self):
|
| 438 |
+
self.attention = FlaxBeitAttention(self.config, self.window_size, dtype=self.dtype)
|
| 439 |
+
self.intermediate = FlaxBeitIntermediate(self.config, dtype=self.dtype)
|
| 440 |
+
self.output = FlaxBeitOutput(self.config, dtype=self.dtype)
|
| 441 |
+
self.layernorm_before = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
| 442 |
+
self.drop_path = FlaxBeitDropPath(rate=self.drop_path_rate)
|
| 443 |
+
self.layernorm_after = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
| 444 |
+
|
| 445 |
+
self.init_values = self.config.layer_scale_init_value
|
| 446 |
+
if self.init_values > 0:
|
| 447 |
+
self.lambda_1 = self.param("lambda_1", ones_with_scale, (self.config.hidden_size), self.init_values)
|
| 448 |
+
self.lambda_2 = self.param("lambda_2", ones_with_scale, (self.config.hidden_size), self.init_values)
|
| 449 |
+
else:
|
| 450 |
+
self.lambda_1 = None
|
| 451 |
+
self.lambda_2 = None
|
| 452 |
+
|
| 453 |
+
def __call__(
|
| 454 |
+
self, hidden_states, relative_position_bias=None, deterministic: bool = True, output_attentions: bool = False
|
| 455 |
+
):
|
| 456 |
+
self_attention_outputs = self.attention(
|
| 457 |
+
self.layernorm_before(hidden_states), # in BEiT, layernorm is applied before self-attention
|
| 458 |
+
relative_position_bias,
|
| 459 |
+
deterministic=deterministic,
|
| 460 |
+
output_attentions=output_attentions,
|
| 461 |
+
)
|
| 462 |
+
attention_output = self_attention_outputs[0]
|
| 463 |
+
|
| 464 |
+
# apply lambda_1 if present
|
| 465 |
+
if self.lambda_1 is not None:
|
| 466 |
+
attention_output = self.lambda_1.astype(attention_output.dtype) * attention_output
|
| 467 |
+
|
| 468 |
+
# first residual connection
|
| 469 |
+
hidden_states = self.drop_path(attention_output, deterministic=deterministic) + hidden_states
|
| 470 |
+
|
| 471 |
+
# in BEiT, layernorm is also applied after self-attention
|
| 472 |
+
layer_output = self.layernorm_after(hidden_states)
|
| 473 |
+
|
| 474 |
+
layer_output = self.intermediate(layer_output)
|
| 475 |
+
layer_output = self.output(layer_output, deterministic=deterministic)
|
| 476 |
+
|
| 477 |
+
# apply lambda_2 if present
|
| 478 |
+
if self.lambda_2 is not None:
|
| 479 |
+
layer_output = self.lambda_2.astype(layer_output.dtype) * layer_output
|
| 480 |
+
|
| 481 |
+
# second residual connection
|
| 482 |
+
layer_output = self.drop_path(layer_output, deterministic=deterministic) + hidden_states
|
| 483 |
+
|
| 484 |
+
outputs = (layer_output,)
|
| 485 |
+
|
| 486 |
+
if output_attentions:
|
| 487 |
+
outputs += (self_attention_outputs[1],)
|
| 488 |
+
|
| 489 |
+
return outputs
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
class FlaxBeitLayerCollection(nn.Module):
|
| 493 |
+
config: BeitConfig
|
| 494 |
+
window_size: Tuple[int, int]
|
| 495 |
+
drop_path_rates: List[float]
|
| 496 |
+
relative_position_bias: Callable[[], jnp.ndarray]
|
| 497 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 498 |
+
|
| 499 |
+
def setup(self):
|
| 500 |
+
self.layers = [
|
| 501 |
+
FlaxBeitLayer(
|
| 502 |
+
self.config,
|
| 503 |
+
window_size=self.window_size if self.config.use_relative_position_bias else None,
|
| 504 |
+
drop_path_rate=self.drop_path_rates[i],
|
| 505 |
+
name=str(i),
|
| 506 |
+
dtype=self.dtype,
|
| 507 |
+
)
|
| 508 |
+
for i in range(self.config.num_hidden_layers)
|
| 509 |
+
]
|
| 510 |
+
|
| 511 |
+
def __call__(
|
| 512 |
+
self,
|
| 513 |
+
hidden_states,
|
| 514 |
+
deterministic: bool = True,
|
| 515 |
+
output_attentions: bool = False,
|
| 516 |
+
output_hidden_states: bool = False,
|
| 517 |
+
return_dict: bool = True,
|
| 518 |
+
):
|
| 519 |
+
all_attentions = () if output_attentions else None
|
| 520 |
+
all_hidden_states = () if output_hidden_states else None
|
| 521 |
+
|
| 522 |
+
for i, layer in enumerate(self.layers):
|
| 523 |
+
if output_hidden_states:
|
| 524 |
+
all_hidden_states += (hidden_states,)
|
| 525 |
+
relative_position_bias = self.relative_position_bias() if self.relative_position_bias is not None else None
|
| 526 |
+
layer_outputs = layer(
|
| 527 |
+
hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
hidden_states = layer_outputs[0]
|
| 531 |
+
|
| 532 |
+
if output_attentions:
|
| 533 |
+
all_attentions += (layer_outputs[1],)
|
| 534 |
+
|
| 535 |
+
if output_hidden_states:
|
| 536 |
+
all_hidden_states += (hidden_states,)
|
| 537 |
+
|
| 538 |
+
outputs = (hidden_states,)
|
| 539 |
+
if not return_dict:
|
| 540 |
+
return tuple(v for v in outputs if v is not None)
|
| 541 |
+
|
| 542 |
+
return FlaxBaseModelOutput(
|
| 543 |
+
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
class FlaxBeitEncoder(nn.Module):
|
| 548 |
+
config: BeitConfig
|
| 549 |
+
window_size: Tuple[int, int]
|
| 550 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 551 |
+
|
| 552 |
+
def setup(self):
|
| 553 |
+
if self.config.use_shared_relative_position_bias:
|
| 554 |
+
self.relative_position_bias = FlaxBeitRelativePositionBias(
|
| 555 |
+
config=self.config, window_size=self.window_size, dtype=self.dtype
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
# stochastic depth decay rule
|
| 559 |
+
drop_path_rates = list(np.linspace(0, self.config.drop_path_rate, self.config.num_hidden_layers))
|
| 560 |
+
self.layer = FlaxBeitLayerCollection(
|
| 561 |
+
self.config,
|
| 562 |
+
window_size=self.window_size,
|
| 563 |
+
drop_path_rates=drop_path_rates,
|
| 564 |
+
relative_position_bias=self.relative_position_bias
|
| 565 |
+
if self.config.use_shared_relative_position_bias
|
| 566 |
+
else None,
|
| 567 |
+
dtype=self.dtype,
|
| 568 |
+
)
|
| 569 |
+
|
| 570 |
+
def __call__(
|
| 571 |
+
self,
|
| 572 |
+
hidden_states,
|
| 573 |
+
deterministic: bool = True,
|
| 574 |
+
output_attentions: bool = False,
|
| 575 |
+
output_hidden_states: bool = False,
|
| 576 |
+
return_dict: bool = True,
|
| 577 |
+
):
|
| 578 |
+
return self.layer(
|
| 579 |
+
hidden_states,
|
| 580 |
+
deterministic=deterministic,
|
| 581 |
+
output_attentions=output_attentions,
|
| 582 |
+
output_hidden_states=output_hidden_states,
|
| 583 |
+
return_dict=return_dict,
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
class FlaxBeitPreTrainedModel(FlaxPreTrainedModel):
|
| 588 |
+
"""
|
| 589 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 590 |
+
models.
|
| 591 |
+
"""
|
| 592 |
+
|
| 593 |
+
config_class = BeitConfig
|
| 594 |
+
base_model_prefix = "beit"
|
| 595 |
+
main_input_name = "pixel_values"
|
| 596 |
+
module_class: nn.Module = None
|
| 597 |
+
|
| 598 |
+
def __init__(
|
| 599 |
+
self,
|
| 600 |
+
config: BeitConfig,
|
| 601 |
+
input_shape=None,
|
| 602 |
+
seed: int = 0,
|
| 603 |
+
dtype: jnp.dtype = jnp.float32,
|
| 604 |
+
_do_init: bool = True,
|
| 605 |
+
**kwargs,
|
| 606 |
+
):
|
| 607 |
+
module = self.module_class(config=config, dtype=dtype, **kwargs)
|
| 608 |
+
if input_shape is None:
|
| 609 |
+
input_shape = (1, config.image_size, config.image_size, config.num_channels)
|
| 610 |
+
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
|
| 611 |
+
|
| 612 |
+
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
|
| 613 |
+
# init input tensors
|
| 614 |
+
pixel_values = jnp.zeros(input_shape, dtype=self.dtype)
|
| 615 |
+
|
| 616 |
+
params_rng, dropout_rng = jax.random.split(rng)
|
| 617 |
+
dropout_rng, droppath_rng = jax.random.split(dropout_rng)
|
| 618 |
+
rngs = {"params": params_rng, "dropout": dropout_rng, "droppath": droppath_rng}
|
| 619 |
+
|
| 620 |
+
random_params = self.module.init(rngs, pixel_values, return_dict=False)["params"]
|
| 621 |
+
|
| 622 |
+
if params is not None:
|
| 623 |
+
random_params = flatten_dict(unfreeze(random_params))
|
| 624 |
+
params = flatten_dict(unfreeze(params))
|
| 625 |
+
for missing_key in self._missing_keys:
|
| 626 |
+
params[missing_key] = random_params[missing_key]
|
| 627 |
+
self._missing_keys = set()
|
| 628 |
+
return freeze(unflatten_dict(params))
|
| 629 |
+
else:
|
| 630 |
+
return random_params
|
| 631 |
+
|
| 632 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 633 |
+
def __call__(
|
| 634 |
+
self,
|
| 635 |
+
pixel_values,
|
| 636 |
+
bool_masked_pos=None,
|
| 637 |
+
params: dict = None,
|
| 638 |
+
dropout_rng: jax.random.PRNGKey = None,
|
| 639 |
+
train: bool = False,
|
| 640 |
+
output_attentions: Optional[bool] = None,
|
| 641 |
+
output_hidden_states: Optional[bool] = None,
|
| 642 |
+
return_dict: Optional[bool] = None,
|
| 643 |
+
):
|
| 644 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 645 |
+
output_hidden_states = (
|
| 646 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 647 |
+
)
|
| 648 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
| 649 |
+
|
| 650 |
+
pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
|
| 651 |
+
# Handle any PRNG if needed
|
| 652 |
+
rngs = {}
|
| 653 |
+
if dropout_rng is not None:
|
| 654 |
+
dropout_rng, droppath_rng = jax.random.split(dropout_rng)
|
| 655 |
+
rngs["dropout"] = dropout_rng
|
| 656 |
+
rngs["droppath"] = droppath_rng
|
| 657 |
+
|
| 658 |
+
return self.module.apply(
|
| 659 |
+
{"params": params or self.params},
|
| 660 |
+
jnp.array(pixel_values, dtype=jnp.float32),
|
| 661 |
+
bool_masked_pos,
|
| 662 |
+
not train,
|
| 663 |
+
output_attentions,
|
| 664 |
+
output_hidden_states,
|
| 665 |
+
return_dict,
|
| 666 |
+
rngs=rngs,
|
| 667 |
+
)
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
class FlaxBeitPooler(nn.Module):
|
| 671 |
+
config: BeitConfig
|
| 672 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 673 |
+
|
| 674 |
+
def setup(self):
|
| 675 |
+
if self.config.use_mean_pooling:
|
| 676 |
+
self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
| 677 |
+
|
| 678 |
+
def __call__(self, hidden_states):
|
| 679 |
+
if self.config.use_mean_pooling:
|
| 680 |
+
# Mean pool the final hidden states of the patch tokens
|
| 681 |
+
patch_tokens = hidden_states[:, 1:, :]
|
| 682 |
+
pooled_output = self.layernorm(jnp.mean(patch_tokens, axis=1))
|
| 683 |
+
else:
|
| 684 |
+
# Pool by simply taking the final hidden state of the [CLS] token
|
| 685 |
+
pooled_output = hidden_states[:, 0]
|
| 686 |
+
|
| 687 |
+
return pooled_output
|
| 688 |
+
|
| 689 |
+
|
| 690 |
+
class FlaxBeitModule(nn.Module):
|
| 691 |
+
config: BeitConfig
|
| 692 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 693 |
+
add_pooling_layer: bool = True
|
| 694 |
+
|
| 695 |
+
def setup(self):
|
| 696 |
+
self.embeddings = FlaxBeitEmbeddings(self.config, dtype=self.dtype)
|
| 697 |
+
self.encoder = FlaxBeitEncoder(
|
| 698 |
+
self.config, window_size=self.embeddings.patch_embeddings.patch_shape, dtype=self.dtype
|
| 699 |
+
)
|
| 700 |
+
if not self.config.use_mean_pooling:
|
| 701 |
+
self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
| 702 |
+
self.pooler = FlaxBeitPooler(self.config, dtype=self.dtype) if self.add_pooling_layer else None
|
| 703 |
+
|
| 704 |
+
def __call__(
|
| 705 |
+
self,
|
| 706 |
+
pixel_values,
|
| 707 |
+
bool_masked_pos=None,
|
| 708 |
+
deterministic: bool = True,
|
| 709 |
+
output_attentions: bool = False,
|
| 710 |
+
output_hidden_states: bool = False,
|
| 711 |
+
return_dict: bool = True,
|
| 712 |
+
):
|
| 713 |
+
hidden_states = self.embeddings(pixel_values, bool_masked_pos, deterministic=deterministic)
|
| 714 |
+
|
| 715 |
+
outputs = self.encoder(
|
| 716 |
+
hidden_states,
|
| 717 |
+
deterministic=deterministic,
|
| 718 |
+
output_attentions=output_attentions,
|
| 719 |
+
output_hidden_states=output_hidden_states,
|
| 720 |
+
return_dict=return_dict,
|
| 721 |
+
)
|
| 722 |
+
hidden_states = outputs[0]
|
| 723 |
+
if not self.config.use_mean_pooling:
|
| 724 |
+
hidden_states = self.layernorm(hidden_states)
|
| 725 |
+
pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
|
| 726 |
+
|
| 727 |
+
if not return_dict:
|
| 728 |
+
# if pooled is None, don't return it
|
| 729 |
+
if pooled is None:
|
| 730 |
+
return (hidden_states,) + outputs[1:]
|
| 731 |
+
return (hidden_states, pooled) + outputs[1:]
|
| 732 |
+
|
| 733 |
+
return FlaxBeitModelOutputWithPooling(
|
| 734 |
+
last_hidden_state=hidden_states,
|
| 735 |
+
pooler_output=pooled,
|
| 736 |
+
hidden_states=outputs.hidden_states,
|
| 737 |
+
attentions=outputs.attentions,
|
| 738 |
+
)
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
@add_start_docstrings(
|
| 742 |
+
"The bare Beit Model transformer outputting raw hidden-states without any specific head on top.",
|
| 743 |
+
BEIT_START_DOCSTRING,
|
| 744 |
+
)
|
| 745 |
+
class FlaxBeitModel(FlaxBeitPreTrainedModel):
|
| 746 |
+
module_class = FlaxBeitModule
|
| 747 |
+
|
| 748 |
+
|
| 749 |
+
FLAX_BEIT_MODEL_DOCSTRING = """
|
| 750 |
+
Returns:
|
| 751 |
+
|
| 752 |
+
Examples:
|
| 753 |
+
|
| 754 |
+
```python
|
| 755 |
+
>>> from transformers import AutoImageProcessor, FlaxBeitModel
|
| 756 |
+
>>> from PIL import Image
|
| 757 |
+
>>> import requests
|
| 758 |
+
|
| 759 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 760 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 761 |
+
|
| 762 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")
|
| 763 |
+
>>> model = FlaxBeitModel.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")
|
| 764 |
+
|
| 765 |
+
>>> inputs = image_processor(images=image, return_tensors="np")
|
| 766 |
+
>>> outputs = model(**inputs)
|
| 767 |
+
>>> last_hidden_states = outputs.last_hidden_state
|
| 768 |
+
```
|
| 769 |
+
"""
|
| 770 |
+
|
| 771 |
+
overwrite_call_docstring(FlaxBeitModel, FLAX_BEIT_MODEL_DOCSTRING)
|
| 772 |
+
append_replace_return_docstrings(FlaxBeitModel, output_type=FlaxBeitModelOutputWithPooling, config_class=BeitConfig)
|
| 773 |
+
|
| 774 |
+
|
| 775 |
+
class FlaxBeitForMaskedImageModelingModule(nn.Module):
|
| 776 |
+
config: BeitConfig
|
| 777 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
| 778 |
+
|
| 779 |
+
def setup(self):
|
| 780 |
+
self.beit = FlaxBeitModule(self.config, add_pooling_layer=False, dtype=self.dtype)
|
| 781 |
+
|
| 782 |
+
# Classifier head
|
| 783 |
+
self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
| 784 |
+
self.lm_head = nn.Dense(
|
| 785 |
+
self.config.vocab_size,
|
| 786 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
| 787 |
+
dtype=self.dtype,
|
| 788 |
+
)
|
| 789 |
+
|
| 790 |
+
def __call__(
|
| 791 |
+
self,
|
| 792 |
+
pixel_values=None,
|
| 793 |
+
bool_masked_pos=None,
|
| 794 |
+
deterministic: bool = True,
|
| 795 |
+
output_attentions=None,
|
| 796 |
+
output_hidden_states=None,
|
| 797 |
+
return_dict=None,
|
| 798 |
+
):
|
| 799 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 800 |
+
|
| 801 |
+
outputs = self.beit(
|
| 802 |
+
pixel_values,
|
| 803 |
+
bool_masked_pos,
|
| 804 |
+
deterministic=deterministic,
|
| 805 |
+
output_attentions=output_attentions,
|
| 806 |
+
output_hidden_states=output_hidden_states,
|
| 807 |
+
return_dict=return_dict,
|
| 808 |
+
)
|
| 809 |
+
|
| 810 |
+
sequence_output = outputs[0]
|
| 811 |
+
sequence_output = self.layernorm(sequence_output)
|
| 812 |
+
prediction_scores = self.lm_head(sequence_output[:, 1:])
|
| 813 |
+
|
| 814 |
+
if not return_dict:
|
| 815 |
+
output = (prediction_scores,) + outputs[2:]
|
| 816 |
+
return output
|
| 817 |
+
|
| 818 |
+
return FlaxMaskedLMOutput(
|
| 819 |
+
logits=prediction_scores,
|
| 820 |
+
hidden_states=outputs.hidden_states,
|
| 821 |
+
attentions=outputs.attentions,
|
| 822 |
+
)
|
| 823 |
+
|
| 824 |
+
|
| 825 |
+
@add_start_docstrings(
|
| 826 |
+
"Beit Model transformer with a 'language' modeling head on top (to predict visual tokens).",
|
| 827 |
+
BEIT_START_DOCSTRING,
|
| 828 |
+
)
|
| 829 |
+
class FlaxBeitForMaskedImageModeling(FlaxBeitPreTrainedModel):
|
| 830 |
+
module_class = FlaxBeitForMaskedImageModelingModule
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
FLAX_BEIT_MLM_DOCSTRING = """
|
| 834 |
+
bool_masked_pos (`numpy.ndarray` of shape `(batch_size, num_patches)`):
|
| 835 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
| 836 |
+
|
| 837 |
+
Returns:
|
| 838 |
+
|
| 839 |
+
Examples:
|
| 840 |
+
|
| 841 |
+
```python
|
| 842 |
+
>>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
|
| 843 |
+
>>> from PIL import Image
|
| 844 |
+
>>> import requests
|
| 845 |
+
|
| 846 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 847 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 848 |
+
|
| 849 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
|
| 850 |
+
>>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
|
| 851 |
+
|
| 852 |
+
>>> inputs = image_processor(images=image, return_tensors="np")
|
| 853 |
+
>>> outputs = model(**inputs)
|
| 854 |
+
>>> logits = outputs.logits
|
| 855 |
+
```
|
| 856 |
+
"""
|
| 857 |
+
|
| 858 |
+
overwrite_call_docstring(FlaxBeitForMaskedImageModeling, FLAX_BEIT_MLM_DOCSTRING)
|
| 859 |
+
append_replace_return_docstrings(
|
| 860 |
+
FlaxBeitForMaskedImageModeling, output_type=FlaxMaskedLMOutput, config_class=BeitConfig
|
| 861 |
+
)
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
class FlaxBeitForImageClassificationModule(nn.Module):
|
| 865 |
+
config: BeitConfig
|
| 866 |
+
dtype: jnp.dtype = jnp.float32
|
| 867 |
+
|
| 868 |
+
def setup(self):
|
| 869 |
+
self.beit = FlaxBeitModule(config=self.config, dtype=self.dtype, add_pooling_layer=True)
|
| 870 |
+
self.classifier = nn.Dense(
|
| 871 |
+
self.config.num_labels,
|
| 872 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
| 873 |
+
dtype=self.dtype,
|
| 874 |
+
)
|
| 875 |
+
|
| 876 |
+
def __call__(
|
| 877 |
+
self,
|
| 878 |
+
pixel_values=None,
|
| 879 |
+
bool_masked_pos=None,
|
| 880 |
+
deterministic: bool = True,
|
| 881 |
+
output_attentions=None,
|
| 882 |
+
output_hidden_states=None,
|
| 883 |
+
return_dict=None,
|
| 884 |
+
):
|
| 885 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 886 |
+
|
| 887 |
+
outputs = self.beit(
|
| 888 |
+
pixel_values,
|
| 889 |
+
deterministic=deterministic,
|
| 890 |
+
output_attentions=output_attentions,
|
| 891 |
+
output_hidden_states=output_hidden_states,
|
| 892 |
+
return_dict=return_dict,
|
| 893 |
+
)
|
| 894 |
+
|
| 895 |
+
pooled_output = outputs[1]
|
| 896 |
+
logits = self.classifier(pooled_output)
|
| 897 |
+
|
| 898 |
+
if not return_dict:
|
| 899 |
+
output = (logits,) + outputs[2:]
|
| 900 |
+
return output
|
| 901 |
+
|
| 902 |
+
return FlaxSequenceClassifierOutput(
|
| 903 |
+
logits=logits,
|
| 904 |
+
hidden_states=outputs.hidden_states,
|
| 905 |
+
attentions=outputs.attentions,
|
| 906 |
+
)
|
| 907 |
+
|
| 908 |
+
|
| 909 |
+
@add_start_docstrings(
|
| 910 |
+
"""
|
| 911 |
+
Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
|
| 912 |
+
hidden states of the patch tokens) e.g. for ImageNet.
|
| 913 |
+
""",
|
| 914 |
+
BEIT_START_DOCSTRING,
|
| 915 |
+
)
|
| 916 |
+
class FlaxBeitForImageClassification(FlaxBeitPreTrainedModel):
|
| 917 |
+
module_class = FlaxBeitForImageClassificationModule
|
| 918 |
+
|
| 919 |
+
|
| 920 |
+
FLAX_BEIT_CLASSIF_DOCSTRING = """
|
| 921 |
+
Returns:
|
| 922 |
+
|
| 923 |
+
Example:
|
| 924 |
+
|
| 925 |
+
```python
|
| 926 |
+
>>> from transformers import AutoImageProcessor, FlaxBeitForImageClassification
|
| 927 |
+
>>> from PIL import Image
|
| 928 |
+
>>> import requests
|
| 929 |
+
|
| 930 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 931 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 932 |
+
|
| 933 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
|
| 934 |
+
>>> model = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224")
|
| 935 |
+
|
| 936 |
+
>>> inputs = image_processor(images=image, return_tensors="np")
|
| 937 |
+
>>> outputs = model(**inputs)
|
| 938 |
+
>>> logits = outputs.logits
|
| 939 |
+
>>> # model predicts one of the 1000 ImageNet classes
|
| 940 |
+
>>> predicted_class_idx = logits.argmax(-1).item()
|
| 941 |
+
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
|
| 942 |
+
```
|
| 943 |
+
"""
|
| 944 |
+
|
| 945 |
+
overwrite_call_docstring(FlaxBeitForImageClassification, FLAX_BEIT_CLASSIF_DOCSTRING)
|
| 946 |
+
append_replace_return_docstrings(
|
| 947 |
+
FlaxBeitForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=BeitConfig
|
| 948 |
+
)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__init__.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from ...utils import (
|
| 17 |
+
OptionalDependencyNotAvailable,
|
| 18 |
+
_LazyModule,
|
| 19 |
+
is_tf_available,
|
| 20 |
+
is_tokenizers_available,
|
| 21 |
+
is_torch_available,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
_import_structure = {
|
| 26 |
+
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
|
| 27 |
+
"tokenization_convbert": ["ConvBertTokenizer"],
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
if not is_tokenizers_available():
|
| 32 |
+
raise OptionalDependencyNotAvailable()
|
| 33 |
+
except OptionalDependencyNotAvailable:
|
| 34 |
+
pass
|
| 35 |
+
else:
|
| 36 |
+
_import_structure["tokenization_convbert_fast"] = ["ConvBertTokenizerFast"]
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
if not is_torch_available():
|
| 40 |
+
raise OptionalDependencyNotAvailable()
|
| 41 |
+
except OptionalDependencyNotAvailable:
|
| 42 |
+
pass
|
| 43 |
+
else:
|
| 44 |
+
_import_structure["modeling_convbert"] = [
|
| 45 |
+
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
| 46 |
+
"ConvBertForMaskedLM",
|
| 47 |
+
"ConvBertForMultipleChoice",
|
| 48 |
+
"ConvBertForQuestionAnswering",
|
| 49 |
+
"ConvBertForSequenceClassification",
|
| 50 |
+
"ConvBertForTokenClassification",
|
| 51 |
+
"ConvBertLayer",
|
| 52 |
+
"ConvBertModel",
|
| 53 |
+
"ConvBertPreTrainedModel",
|
| 54 |
+
"load_tf_weights_in_convbert",
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
if not is_tf_available():
|
| 60 |
+
raise OptionalDependencyNotAvailable()
|
| 61 |
+
except OptionalDependencyNotAvailable:
|
| 62 |
+
pass
|
| 63 |
+
else:
|
| 64 |
+
_import_structure["modeling_tf_convbert"] = [
|
| 65 |
+
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
| 66 |
+
"TFConvBertForMaskedLM",
|
| 67 |
+
"TFConvBertForMultipleChoice",
|
| 68 |
+
"TFConvBertForQuestionAnswering",
|
| 69 |
+
"TFConvBertForSequenceClassification",
|
| 70 |
+
"TFConvBertForTokenClassification",
|
| 71 |
+
"TFConvBertLayer",
|
| 72 |
+
"TFConvBertModel",
|
| 73 |
+
"TFConvBertPreTrainedModel",
|
| 74 |
+
]
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
if TYPE_CHECKING:
|
| 78 |
+
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
|
| 79 |
+
from .tokenization_convbert import ConvBertTokenizer
|
| 80 |
+
|
| 81 |
+
try:
|
| 82 |
+
if not is_tokenizers_available():
|
| 83 |
+
raise OptionalDependencyNotAvailable()
|
| 84 |
+
except OptionalDependencyNotAvailable:
|
| 85 |
+
pass
|
| 86 |
+
else:
|
| 87 |
+
from .tokenization_convbert_fast import ConvBertTokenizerFast
|
| 88 |
+
|
| 89 |
+
try:
|
| 90 |
+
if not is_torch_available():
|
| 91 |
+
raise OptionalDependencyNotAvailable()
|
| 92 |
+
except OptionalDependencyNotAvailable:
|
| 93 |
+
pass
|
| 94 |
+
else:
|
| 95 |
+
from .modeling_convbert import (
|
| 96 |
+
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
| 97 |
+
ConvBertForMaskedLM,
|
| 98 |
+
ConvBertForMultipleChoice,
|
| 99 |
+
ConvBertForQuestionAnswering,
|
| 100 |
+
ConvBertForSequenceClassification,
|
| 101 |
+
ConvBertForTokenClassification,
|
| 102 |
+
ConvBertLayer,
|
| 103 |
+
ConvBertModel,
|
| 104 |
+
ConvBertPreTrainedModel,
|
| 105 |
+
load_tf_weights_in_convbert,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
try:
|
| 109 |
+
if not is_tf_available():
|
| 110 |
+
raise OptionalDependencyNotAvailable()
|
| 111 |
+
except OptionalDependencyNotAvailable:
|
| 112 |
+
pass
|
| 113 |
+
else:
|
| 114 |
+
from .modeling_tf_convbert import (
|
| 115 |
+
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
| 116 |
+
TFConvBertForMaskedLM,
|
| 117 |
+
TFConvBertForMultipleChoice,
|
| 118 |
+
TFConvBertForQuestionAnswering,
|
| 119 |
+
TFConvBertForSequenceClassification,
|
| 120 |
+
TFConvBertForTokenClassification,
|
| 121 |
+
TFConvBertLayer,
|
| 122 |
+
TFConvBertModel,
|
| 123 |
+
TFConvBertPreTrainedModel,
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
else:
|
| 128 |
+
import sys
|
| 129 |
+
|
| 130 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.99 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/configuration_convbert.cpython-310.pyc
ADDED
|
Binary file (6.36 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.cpython-310.pyc
ADDED
|
Binary file (1.41 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_convbert.cpython-310.pyc
ADDED
|
Binary file (38.6 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_tf_convbert.cpython-310.pyc
ADDED
|
Binary file (43.2 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc
ADDED
|
Binary file (17.7 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert_fast.cpython-310.pyc
ADDED
|
Binary file (7.39 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/configuration_convbert.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright The HuggingFace team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" ConvBERT model configuration"""
|
| 16 |
+
|
| 17 |
+
from collections import OrderedDict
|
| 18 |
+
from typing import Mapping
|
| 19 |
+
|
| 20 |
+
from ...configuration_utils import PretrainedConfig
|
| 21 |
+
from ...onnx import OnnxConfig
|
| 22 |
+
from ...utils import logging
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
logger = logging.get_logger(__name__)
|
| 26 |
+
|
| 27 |
+
CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 28 |
+
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
|
| 29 |
+
"YituTech/conv-bert-medium-small": (
|
| 30 |
+
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
|
| 31 |
+
),
|
| 32 |
+
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
|
| 33 |
+
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class ConvBertConfig(PretrainedConfig):
|
| 38 |
+
r"""
|
| 39 |
+
This is the configuration class to store the configuration of a [`ConvBertModel`]. It is used to instantiate an
|
| 40 |
+
ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 41 |
+
with the defaults will yield a similar configuration to that of the ConvBERT
|
| 42 |
+
[YituTech/conv-bert-base](https://huggingface.co/YituTech/conv-bert-base) architecture.
|
| 43 |
+
|
| 44 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 45 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
| 50 |
+
Vocabulary size of the ConvBERT model. Defines the number of different tokens that can be represented by
|
| 51 |
+
the `inputs_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`].
|
| 52 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 53 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 54 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 55 |
+
Number of hidden layers in the Transformer encoder.
|
| 56 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 57 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 58 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 59 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 60 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 61 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 62 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
| 63 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 64 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 65 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 66 |
+
The dropout ratio for the attention probabilities.
|
| 67 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
| 68 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 69 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 70 |
+
type_vocab_size (`int`, *optional*, defaults to 2):
|
| 71 |
+
The vocabulary size of the `token_type_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`].
|
| 72 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 73 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 74 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 75 |
+
The epsilon used by the layer normalization layers.
|
| 76 |
+
head_ratio (`int`, *optional*, defaults to 2):
|
| 77 |
+
Ratio gamma to reduce the number of attention heads.
|
| 78 |
+
num_groups (`int`, *optional*, defaults to 1):
|
| 79 |
+
The number of groups for grouped linear layers for ConvBert model
|
| 80 |
+
conv_kernel_size (`int`, *optional*, defaults to 9):
|
| 81 |
+
The size of the convolutional kernel.
|
| 82 |
+
classifier_dropout (`float`, *optional*):
|
| 83 |
+
The dropout ratio for the classification head.
|
| 84 |
+
|
| 85 |
+
Example:
|
| 86 |
+
|
| 87 |
+
```python
|
| 88 |
+
>>> from transformers import ConvBertConfig, ConvBertModel
|
| 89 |
+
|
| 90 |
+
>>> # Initializing a ConvBERT convbert-base-uncased style configuration
|
| 91 |
+
>>> configuration = ConvBertConfig()
|
| 92 |
+
|
| 93 |
+
>>> # Initializing a model (with random weights) from the convbert-base-uncased style configuration
|
| 94 |
+
>>> model = ConvBertModel(configuration)
|
| 95 |
+
|
| 96 |
+
>>> # Accessing the model configuration
|
| 97 |
+
>>> configuration = model.config
|
| 98 |
+
```"""
|
| 99 |
+
|
| 100 |
+
model_type = "convbert"
|
| 101 |
+
|
| 102 |
+
def __init__(
|
| 103 |
+
self,
|
| 104 |
+
vocab_size=30522,
|
| 105 |
+
hidden_size=768,
|
| 106 |
+
num_hidden_layers=12,
|
| 107 |
+
num_attention_heads=12,
|
| 108 |
+
intermediate_size=3072,
|
| 109 |
+
hidden_act="gelu",
|
| 110 |
+
hidden_dropout_prob=0.1,
|
| 111 |
+
attention_probs_dropout_prob=0.1,
|
| 112 |
+
max_position_embeddings=512,
|
| 113 |
+
type_vocab_size=2,
|
| 114 |
+
initializer_range=0.02,
|
| 115 |
+
layer_norm_eps=1e-12,
|
| 116 |
+
pad_token_id=1,
|
| 117 |
+
bos_token_id=0,
|
| 118 |
+
eos_token_id=2,
|
| 119 |
+
embedding_size=768,
|
| 120 |
+
head_ratio=2,
|
| 121 |
+
conv_kernel_size=9,
|
| 122 |
+
num_groups=1,
|
| 123 |
+
classifier_dropout=None,
|
| 124 |
+
**kwargs,
|
| 125 |
+
):
|
| 126 |
+
super().__init__(
|
| 127 |
+
pad_token_id=pad_token_id,
|
| 128 |
+
bos_token_id=bos_token_id,
|
| 129 |
+
eos_token_id=eos_token_id,
|
| 130 |
+
**kwargs,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
self.vocab_size = vocab_size
|
| 134 |
+
self.hidden_size = hidden_size
|
| 135 |
+
self.num_hidden_layers = num_hidden_layers
|
| 136 |
+
self.num_attention_heads = num_attention_heads
|
| 137 |
+
self.intermediate_size = intermediate_size
|
| 138 |
+
self.hidden_act = hidden_act
|
| 139 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 140 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 141 |
+
self.max_position_embeddings = max_position_embeddings
|
| 142 |
+
self.type_vocab_size = type_vocab_size
|
| 143 |
+
self.initializer_range = initializer_range
|
| 144 |
+
self.layer_norm_eps = layer_norm_eps
|
| 145 |
+
self.embedding_size = embedding_size
|
| 146 |
+
self.head_ratio = head_ratio
|
| 147 |
+
self.conv_kernel_size = conv_kernel_size
|
| 148 |
+
self.num_groups = num_groups
|
| 149 |
+
self.classifier_dropout = classifier_dropout
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
# Copied from transformers.models.bert.configuration_bert.BertOnnxConfig
|
| 153 |
+
class ConvBertOnnxConfig(OnnxConfig):
|
| 154 |
+
@property
|
| 155 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
| 156 |
+
if self.task == "multiple-choice":
|
| 157 |
+
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
|
| 158 |
+
else:
|
| 159 |
+
dynamic_axis = {0: "batch", 1: "sequence"}
|
| 160 |
+
return OrderedDict(
|
| 161 |
+
[
|
| 162 |
+
("input_ids", dynamic_axis),
|
| 163 |
+
("attention_mask", dynamic_axis),
|
| 164 |
+
("token_type_ids", dynamic_axis),
|
| 165 |
+
]
|
| 166 |
+
)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2020 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Convert ConvBERT checkpoint."""
|
| 16 |
+
|
| 17 |
+
import argparse
|
| 18 |
+
|
| 19 |
+
from transformers import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert
|
| 20 |
+
from transformers.utils import logging
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logging.set_verbosity_info()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path):
|
| 27 |
+
conf = ConvBertConfig.from_json_file(convbert_config_file)
|
| 28 |
+
model = ConvBertModel(conf)
|
| 29 |
+
|
| 30 |
+
model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path)
|
| 31 |
+
model.save_pretrained(pytorch_dump_path)
|
| 32 |
+
|
| 33 |
+
tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True)
|
| 34 |
+
tf_model.save_pretrained(pytorch_dump_path)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
if __name__ == "__main__":
|
| 38 |
+
parser = argparse.ArgumentParser()
|
| 39 |
+
# Required parameters
|
| 40 |
+
parser.add_argument(
|
| 41 |
+
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
|
| 42 |
+
)
|
| 43 |
+
parser.add_argument(
|
| 44 |
+
"--convbert_config_file",
|
| 45 |
+
default=None,
|
| 46 |
+
type=str,
|
| 47 |
+
required=True,
|
| 48 |
+
help=(
|
| 49 |
+
"The config json file corresponding to the pre-trained ConvBERT model. \n"
|
| 50 |
+
"This specifies the model architecture."
|
| 51 |
+
),
|
| 52 |
+
)
|
| 53 |
+
parser.add_argument(
|
| 54 |
+
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
|
| 55 |
+
)
|
| 56 |
+
args = parser.parse_args()
|
| 57 |
+
convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/modeling_convbert.py
ADDED
|
@@ -0,0 +1,1341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" PyTorch ConvBERT model."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
import os
|
| 20 |
+
from operator import attrgetter
|
| 21 |
+
from typing import Optional, Tuple, Union
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.utils.checkpoint
|
| 25 |
+
from torch import nn
|
| 26 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 27 |
+
|
| 28 |
+
from ...activations import ACT2FN, get_activation
|
| 29 |
+
from ...modeling_outputs import (
|
| 30 |
+
BaseModelOutputWithCrossAttentions,
|
| 31 |
+
MaskedLMOutput,
|
| 32 |
+
MultipleChoiceModelOutput,
|
| 33 |
+
QuestionAnsweringModelOutput,
|
| 34 |
+
SequenceClassifierOutput,
|
| 35 |
+
TokenClassifierOutput,
|
| 36 |
+
)
|
| 37 |
+
from ...modeling_utils import PreTrainedModel, SequenceSummary
|
| 38 |
+
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
|
| 39 |
+
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
|
| 40 |
+
from .configuration_convbert import ConvBertConfig
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
logger = logging.get_logger(__name__)
|
| 44 |
+
|
| 45 |
+
_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
|
| 46 |
+
_CONFIG_FOR_DOC = "ConvBertConfig"
|
| 47 |
+
|
| 48 |
+
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 49 |
+
"YituTech/conv-bert-base",
|
| 50 |
+
"YituTech/conv-bert-medium-small",
|
| 51 |
+
"YituTech/conv-bert-small",
|
| 52 |
+
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
|
| 53 |
+
]
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def load_tf_weights_in_convbert(model, config, tf_checkpoint_path):
|
| 57 |
+
"""Load tf checkpoints in a pytorch model."""
|
| 58 |
+
try:
|
| 59 |
+
import tensorflow as tf
|
| 60 |
+
except ImportError:
|
| 61 |
+
logger.error(
|
| 62 |
+
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
|
| 63 |
+
"https://www.tensorflow.org/install/ for installation instructions."
|
| 64 |
+
)
|
| 65 |
+
raise
|
| 66 |
+
tf_path = os.path.abspath(tf_checkpoint_path)
|
| 67 |
+
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
|
| 68 |
+
# Load weights from TF model
|
| 69 |
+
init_vars = tf.train.list_variables(tf_path)
|
| 70 |
+
tf_data = {}
|
| 71 |
+
for name, shape in init_vars:
|
| 72 |
+
logger.info(f"Loading TF weight {name} with shape {shape}")
|
| 73 |
+
array = tf.train.load_variable(tf_path, name)
|
| 74 |
+
tf_data[name] = array
|
| 75 |
+
|
| 76 |
+
param_mapping = {
|
| 77 |
+
"embeddings.word_embeddings.weight": "electra/embeddings/word_embeddings",
|
| 78 |
+
"embeddings.position_embeddings.weight": "electra/embeddings/position_embeddings",
|
| 79 |
+
"embeddings.token_type_embeddings.weight": "electra/embeddings/token_type_embeddings",
|
| 80 |
+
"embeddings.LayerNorm.weight": "electra/embeddings/LayerNorm/gamma",
|
| 81 |
+
"embeddings.LayerNorm.bias": "electra/embeddings/LayerNorm/beta",
|
| 82 |
+
"embeddings_project.weight": "electra/embeddings_project/kernel",
|
| 83 |
+
"embeddings_project.bias": "electra/embeddings_project/bias",
|
| 84 |
+
}
|
| 85 |
+
if config.num_groups > 1:
|
| 86 |
+
group_dense_name = "g_dense"
|
| 87 |
+
else:
|
| 88 |
+
group_dense_name = "dense"
|
| 89 |
+
|
| 90 |
+
for j in range(config.num_hidden_layers):
|
| 91 |
+
param_mapping[
|
| 92 |
+
f"encoder.layer.{j}.attention.self.query.weight"
|
| 93 |
+
] = f"electra/encoder/layer_{j}/attention/self/query/kernel"
|
| 94 |
+
param_mapping[
|
| 95 |
+
f"encoder.layer.{j}.attention.self.query.bias"
|
| 96 |
+
] = f"electra/encoder/layer_{j}/attention/self/query/bias"
|
| 97 |
+
param_mapping[
|
| 98 |
+
f"encoder.layer.{j}.attention.self.key.weight"
|
| 99 |
+
] = f"electra/encoder/layer_{j}/attention/self/key/kernel"
|
| 100 |
+
param_mapping[
|
| 101 |
+
f"encoder.layer.{j}.attention.self.key.bias"
|
| 102 |
+
] = f"electra/encoder/layer_{j}/attention/self/key/bias"
|
| 103 |
+
param_mapping[
|
| 104 |
+
f"encoder.layer.{j}.attention.self.value.weight"
|
| 105 |
+
] = f"electra/encoder/layer_{j}/attention/self/value/kernel"
|
| 106 |
+
param_mapping[
|
| 107 |
+
f"encoder.layer.{j}.attention.self.value.bias"
|
| 108 |
+
] = f"electra/encoder/layer_{j}/attention/self/value/bias"
|
| 109 |
+
param_mapping[
|
| 110 |
+
f"encoder.layer.{j}.attention.self.key_conv_attn_layer.depthwise.weight"
|
| 111 |
+
] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/depthwise_kernel"
|
| 112 |
+
param_mapping[
|
| 113 |
+
f"encoder.layer.{j}.attention.self.key_conv_attn_layer.pointwise.weight"
|
| 114 |
+
] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/pointwise_kernel"
|
| 115 |
+
param_mapping[
|
| 116 |
+
f"encoder.layer.{j}.attention.self.key_conv_attn_layer.bias"
|
| 117 |
+
] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/bias"
|
| 118 |
+
param_mapping[
|
| 119 |
+
f"encoder.layer.{j}.attention.self.conv_kernel_layer.weight"
|
| 120 |
+
] = f"electra/encoder/layer_{j}/attention/self/conv_attn_kernel/kernel"
|
| 121 |
+
param_mapping[
|
| 122 |
+
f"encoder.layer.{j}.attention.self.conv_kernel_layer.bias"
|
| 123 |
+
] = f"electra/encoder/layer_{j}/attention/self/conv_attn_kernel/bias"
|
| 124 |
+
param_mapping[
|
| 125 |
+
f"encoder.layer.{j}.attention.self.conv_out_layer.weight"
|
| 126 |
+
] = f"electra/encoder/layer_{j}/attention/self/conv_attn_point/kernel"
|
| 127 |
+
param_mapping[
|
| 128 |
+
f"encoder.layer.{j}.attention.self.conv_out_layer.bias"
|
| 129 |
+
] = f"electra/encoder/layer_{j}/attention/self/conv_attn_point/bias"
|
| 130 |
+
param_mapping[
|
| 131 |
+
f"encoder.layer.{j}.attention.output.dense.weight"
|
| 132 |
+
] = f"electra/encoder/layer_{j}/attention/output/dense/kernel"
|
| 133 |
+
param_mapping[
|
| 134 |
+
f"encoder.layer.{j}.attention.output.LayerNorm.weight"
|
| 135 |
+
] = f"electra/encoder/layer_{j}/attention/output/LayerNorm/gamma"
|
| 136 |
+
param_mapping[
|
| 137 |
+
f"encoder.layer.{j}.attention.output.dense.bias"
|
| 138 |
+
] = f"electra/encoder/layer_{j}/attention/output/dense/bias"
|
| 139 |
+
param_mapping[
|
| 140 |
+
f"encoder.layer.{j}.attention.output.LayerNorm.bias"
|
| 141 |
+
] = f"electra/encoder/layer_{j}/attention/output/LayerNorm/beta"
|
| 142 |
+
param_mapping[
|
| 143 |
+
f"encoder.layer.{j}.intermediate.dense.weight"
|
| 144 |
+
] = f"electra/encoder/layer_{j}/intermediate/{group_dense_name}/kernel"
|
| 145 |
+
param_mapping[
|
| 146 |
+
f"encoder.layer.{j}.intermediate.dense.bias"
|
| 147 |
+
] = f"electra/encoder/layer_{j}/intermediate/{group_dense_name}/bias"
|
| 148 |
+
param_mapping[
|
| 149 |
+
f"encoder.layer.{j}.output.dense.weight"
|
| 150 |
+
] = f"electra/encoder/layer_{j}/output/{group_dense_name}/kernel"
|
| 151 |
+
param_mapping[
|
| 152 |
+
f"encoder.layer.{j}.output.dense.bias"
|
| 153 |
+
] = f"electra/encoder/layer_{j}/output/{group_dense_name}/bias"
|
| 154 |
+
param_mapping[
|
| 155 |
+
f"encoder.layer.{j}.output.LayerNorm.weight"
|
| 156 |
+
] = f"electra/encoder/layer_{j}/output/LayerNorm/gamma"
|
| 157 |
+
param_mapping[f"encoder.layer.{j}.output.LayerNorm.bias"] = f"electra/encoder/layer_{j}/output/LayerNorm/beta"
|
| 158 |
+
|
| 159 |
+
for param in model.named_parameters():
|
| 160 |
+
param_name = param[0]
|
| 161 |
+
retriever = attrgetter(param_name)
|
| 162 |
+
result = retriever(model)
|
| 163 |
+
tf_name = param_mapping[param_name]
|
| 164 |
+
value = torch.from_numpy(tf_data[tf_name])
|
| 165 |
+
logger.info(f"TF: {tf_name}, PT: {param_name} ")
|
| 166 |
+
if tf_name.endswith("/kernel"):
|
| 167 |
+
if not tf_name.endswith("/intermediate/g_dense/kernel"):
|
| 168 |
+
if not tf_name.endswith("/output/g_dense/kernel"):
|
| 169 |
+
value = value.T
|
| 170 |
+
if tf_name.endswith("/depthwise_kernel"):
|
| 171 |
+
value = value.permute(1, 2, 0) # 2, 0, 1
|
| 172 |
+
if tf_name.endswith("/pointwise_kernel"):
|
| 173 |
+
value = value.permute(2, 1, 0) # 2, 1, 0
|
| 174 |
+
if tf_name.endswith("/conv_attn_key/bias"):
|
| 175 |
+
value = value.unsqueeze(-1)
|
| 176 |
+
result.data = value
|
| 177 |
+
return model
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
class ConvBertEmbeddings(nn.Module):
|
| 181 |
+
"""Construct the embeddings from word, position and token_type embeddings."""
|
| 182 |
+
|
| 183 |
+
def __init__(self, config):
|
| 184 |
+
super().__init__()
|
| 185 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
|
| 186 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
|
| 187 |
+
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
|
| 188 |
+
|
| 189 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
| 190 |
+
# any TensorFlow checkpoint file
|
| 191 |
+
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
|
| 192 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 193 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
| 194 |
+
self.register_buffer(
|
| 195 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
| 196 |
+
)
|
| 197 |
+
self.register_buffer(
|
| 198 |
+
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
def forward(
|
| 202 |
+
self,
|
| 203 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 204 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 205 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 206 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 207 |
+
) -> torch.LongTensor:
|
| 208 |
+
if input_ids is not None:
|
| 209 |
+
input_shape = input_ids.size()
|
| 210 |
+
else:
|
| 211 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 212 |
+
|
| 213 |
+
seq_length = input_shape[1]
|
| 214 |
+
|
| 215 |
+
if position_ids is None:
|
| 216 |
+
position_ids = self.position_ids[:, :seq_length]
|
| 217 |
+
|
| 218 |
+
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
|
| 219 |
+
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
|
| 220 |
+
# issue #5664
|
| 221 |
+
if token_type_ids is None:
|
| 222 |
+
if hasattr(self, "token_type_ids"):
|
| 223 |
+
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
|
| 224 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
|
| 225 |
+
token_type_ids = buffered_token_type_ids_expanded
|
| 226 |
+
else:
|
| 227 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
|
| 228 |
+
|
| 229 |
+
if inputs_embeds is None:
|
| 230 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
| 231 |
+
position_embeddings = self.position_embeddings(position_ids)
|
| 232 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
| 233 |
+
|
| 234 |
+
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
|
| 235 |
+
embeddings = self.LayerNorm(embeddings)
|
| 236 |
+
embeddings = self.dropout(embeddings)
|
| 237 |
+
return embeddings
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
class ConvBertPreTrainedModel(PreTrainedModel):
|
| 241 |
+
"""
|
| 242 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 243 |
+
models.
|
| 244 |
+
"""
|
| 245 |
+
|
| 246 |
+
config_class = ConvBertConfig
|
| 247 |
+
load_tf_weights = load_tf_weights_in_convbert
|
| 248 |
+
base_model_prefix = "convbert"
|
| 249 |
+
supports_gradient_checkpointing = True
|
| 250 |
+
|
| 251 |
+
def _init_weights(self, module):
|
| 252 |
+
"""Initialize the weights"""
|
| 253 |
+
if isinstance(module, nn.Linear):
|
| 254 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 255 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 256 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 257 |
+
if module.bias is not None:
|
| 258 |
+
module.bias.data.zero_()
|
| 259 |
+
elif isinstance(module, nn.Embedding):
|
| 260 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 261 |
+
if module.padding_idx is not None:
|
| 262 |
+
module.weight.data[module.padding_idx].zero_()
|
| 263 |
+
elif isinstance(module, nn.LayerNorm):
|
| 264 |
+
module.bias.data.zero_()
|
| 265 |
+
module.weight.data.fill_(1.0)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
class SeparableConv1D(nn.Module):
|
| 269 |
+
"""This class implements separable convolution, i.e. a depthwise and a pointwise layer"""
|
| 270 |
+
|
| 271 |
+
def __init__(self, config, input_filters, output_filters, kernel_size, **kwargs):
|
| 272 |
+
super().__init__()
|
| 273 |
+
self.depthwise = nn.Conv1d(
|
| 274 |
+
input_filters,
|
| 275 |
+
input_filters,
|
| 276 |
+
kernel_size=kernel_size,
|
| 277 |
+
groups=input_filters,
|
| 278 |
+
padding=kernel_size // 2,
|
| 279 |
+
bias=False,
|
| 280 |
+
)
|
| 281 |
+
self.pointwise = nn.Conv1d(input_filters, output_filters, kernel_size=1, bias=False)
|
| 282 |
+
self.bias = nn.Parameter(torch.zeros(output_filters, 1))
|
| 283 |
+
|
| 284 |
+
self.depthwise.weight.data.normal_(mean=0.0, std=config.initializer_range)
|
| 285 |
+
self.pointwise.weight.data.normal_(mean=0.0, std=config.initializer_range)
|
| 286 |
+
|
| 287 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 288 |
+
x = self.depthwise(hidden_states)
|
| 289 |
+
x = self.pointwise(x)
|
| 290 |
+
x += self.bias
|
| 291 |
+
return x
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
class ConvBertSelfAttention(nn.Module):
|
| 295 |
+
def __init__(self, config):
|
| 296 |
+
super().__init__()
|
| 297 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
| 298 |
+
raise ValueError(
|
| 299 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
| 300 |
+
f"heads ({config.num_attention_heads})"
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
new_num_attention_heads = config.num_attention_heads // config.head_ratio
|
| 304 |
+
if new_num_attention_heads < 1:
|
| 305 |
+
self.head_ratio = config.num_attention_heads
|
| 306 |
+
self.num_attention_heads = 1
|
| 307 |
+
else:
|
| 308 |
+
self.num_attention_heads = new_num_attention_heads
|
| 309 |
+
self.head_ratio = config.head_ratio
|
| 310 |
+
|
| 311 |
+
self.conv_kernel_size = config.conv_kernel_size
|
| 312 |
+
if config.hidden_size % self.num_attention_heads != 0:
|
| 313 |
+
raise ValueError("hidden_size should be divisible by num_attention_heads")
|
| 314 |
+
|
| 315 |
+
self.attention_head_size = (config.hidden_size // self.num_attention_heads) // 2
|
| 316 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 317 |
+
|
| 318 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
| 319 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
| 320 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
| 321 |
+
|
| 322 |
+
self.key_conv_attn_layer = SeparableConv1D(
|
| 323 |
+
config, config.hidden_size, self.all_head_size, self.conv_kernel_size
|
| 324 |
+
)
|
| 325 |
+
self.conv_kernel_layer = nn.Linear(self.all_head_size, self.num_attention_heads * self.conv_kernel_size)
|
| 326 |
+
self.conv_out_layer = nn.Linear(config.hidden_size, self.all_head_size)
|
| 327 |
+
|
| 328 |
+
self.unfold = nn.Unfold(
|
| 329 |
+
kernel_size=[self.conv_kernel_size, 1], padding=[int((self.conv_kernel_size - 1) / 2), 0]
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 333 |
+
|
| 334 |
+
def transpose_for_scores(self, x):
|
| 335 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
| 336 |
+
x = x.view(*new_x_shape)
|
| 337 |
+
return x.permute(0, 2, 1, 3)
|
| 338 |
+
|
| 339 |
+
def forward(
|
| 340 |
+
self,
|
| 341 |
+
hidden_states: torch.Tensor,
|
| 342 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 343 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 344 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 345 |
+
output_attentions: Optional[bool] = False,
|
| 346 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 347 |
+
mixed_query_layer = self.query(hidden_states)
|
| 348 |
+
batch_size = hidden_states.size(0)
|
| 349 |
+
# If this is instantiated as a cross-attention module, the keys
|
| 350 |
+
# and values come from an encoder; the attention mask needs to be
|
| 351 |
+
# such that the encoder's padding tokens are not attended to.
|
| 352 |
+
if encoder_hidden_states is not None:
|
| 353 |
+
mixed_key_layer = self.key(encoder_hidden_states)
|
| 354 |
+
mixed_value_layer = self.value(encoder_hidden_states)
|
| 355 |
+
else:
|
| 356 |
+
mixed_key_layer = self.key(hidden_states)
|
| 357 |
+
mixed_value_layer = self.value(hidden_states)
|
| 358 |
+
|
| 359 |
+
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states.transpose(1, 2))
|
| 360 |
+
mixed_key_conv_attn_layer = mixed_key_conv_attn_layer.transpose(1, 2)
|
| 361 |
+
|
| 362 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 363 |
+
key_layer = self.transpose_for_scores(mixed_key_layer)
|
| 364 |
+
value_layer = self.transpose_for_scores(mixed_value_layer)
|
| 365 |
+
conv_attn_layer = torch.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
|
| 366 |
+
|
| 367 |
+
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
|
| 368 |
+
conv_kernel_layer = torch.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
|
| 369 |
+
conv_kernel_layer = torch.softmax(conv_kernel_layer, dim=1)
|
| 370 |
+
|
| 371 |
+
conv_out_layer = self.conv_out_layer(hidden_states)
|
| 372 |
+
conv_out_layer = torch.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
|
| 373 |
+
conv_out_layer = conv_out_layer.transpose(1, 2).contiguous().unsqueeze(-1)
|
| 374 |
+
conv_out_layer = nn.functional.unfold(
|
| 375 |
+
conv_out_layer,
|
| 376 |
+
kernel_size=[self.conv_kernel_size, 1],
|
| 377 |
+
dilation=1,
|
| 378 |
+
padding=[(self.conv_kernel_size - 1) // 2, 0],
|
| 379 |
+
stride=1,
|
| 380 |
+
)
|
| 381 |
+
conv_out_layer = conv_out_layer.transpose(1, 2).reshape(
|
| 382 |
+
batch_size, -1, self.all_head_size, self.conv_kernel_size
|
| 383 |
+
)
|
| 384 |
+
conv_out_layer = torch.reshape(conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
|
| 385 |
+
conv_out_layer = torch.matmul(conv_out_layer, conv_kernel_layer)
|
| 386 |
+
conv_out_layer = torch.reshape(conv_out_layer, [-1, self.all_head_size])
|
| 387 |
+
|
| 388 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 389 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
| 390 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 391 |
+
if attention_mask is not None:
|
| 392 |
+
# Apply the attention mask is (precomputed for all layers in ConvBertModel forward() function)
|
| 393 |
+
attention_scores = attention_scores + attention_mask
|
| 394 |
+
|
| 395 |
+
# Normalize the attention scores to probabilities.
|
| 396 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 397 |
+
|
| 398 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 399 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 400 |
+
attention_probs = self.dropout(attention_probs)
|
| 401 |
+
|
| 402 |
+
# Mask heads if we want to
|
| 403 |
+
if head_mask is not None:
|
| 404 |
+
attention_probs = attention_probs * head_mask
|
| 405 |
+
|
| 406 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
| 407 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 408 |
+
|
| 409 |
+
conv_out = torch.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
|
| 410 |
+
context_layer = torch.cat([context_layer, conv_out], 2)
|
| 411 |
+
|
| 412 |
+
# conv and context
|
| 413 |
+
new_context_layer_shape = context_layer.size()[:-2] + (
|
| 414 |
+
self.num_attention_heads * self.attention_head_size * 2,
|
| 415 |
+
)
|
| 416 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
| 417 |
+
|
| 418 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 419 |
+
return outputs
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
class ConvBertSelfOutput(nn.Module):
|
| 423 |
+
def __init__(self, config):
|
| 424 |
+
super().__init__()
|
| 425 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 426 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 427 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 428 |
+
|
| 429 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
| 430 |
+
hidden_states = self.dense(hidden_states)
|
| 431 |
+
hidden_states = self.dropout(hidden_states)
|
| 432 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 433 |
+
return hidden_states
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
class ConvBertAttention(nn.Module):
|
| 437 |
+
def __init__(self, config):
|
| 438 |
+
super().__init__()
|
| 439 |
+
self.self = ConvBertSelfAttention(config)
|
| 440 |
+
self.output = ConvBertSelfOutput(config)
|
| 441 |
+
self.pruned_heads = set()
|
| 442 |
+
|
| 443 |
+
def prune_heads(self, heads):
|
| 444 |
+
if len(heads) == 0:
|
| 445 |
+
return
|
| 446 |
+
heads, index = find_pruneable_heads_and_indices(
|
| 447 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
# Prune linear layers
|
| 451 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
| 452 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
| 453 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
| 454 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
| 455 |
+
|
| 456 |
+
# Update hyper params and store pruned heads
|
| 457 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
| 458 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
| 459 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 460 |
+
|
| 461 |
+
def forward(
|
| 462 |
+
self,
|
| 463 |
+
hidden_states: torch.Tensor,
|
| 464 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 465 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 466 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 467 |
+
output_attentions: Optional[bool] = False,
|
| 468 |
+
) -> Tuple[torch.Tensor, Optional[torch.FloatTensor]]:
|
| 469 |
+
self_outputs = self.self(
|
| 470 |
+
hidden_states,
|
| 471 |
+
attention_mask,
|
| 472 |
+
head_mask,
|
| 473 |
+
encoder_hidden_states,
|
| 474 |
+
output_attentions,
|
| 475 |
+
)
|
| 476 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
| 477 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
| 478 |
+
return outputs
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
class GroupedLinearLayer(nn.Module):
|
| 482 |
+
def __init__(self, input_size, output_size, num_groups):
|
| 483 |
+
super().__init__()
|
| 484 |
+
self.input_size = input_size
|
| 485 |
+
self.output_size = output_size
|
| 486 |
+
self.num_groups = num_groups
|
| 487 |
+
self.group_in_dim = self.input_size // self.num_groups
|
| 488 |
+
self.group_out_dim = self.output_size // self.num_groups
|
| 489 |
+
self.weight = nn.Parameter(torch.empty(self.num_groups, self.group_in_dim, self.group_out_dim))
|
| 490 |
+
self.bias = nn.Parameter(torch.empty(output_size))
|
| 491 |
+
|
| 492 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 493 |
+
batch_size = list(hidden_states.size())[0]
|
| 494 |
+
x = torch.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim])
|
| 495 |
+
x = x.permute(1, 0, 2)
|
| 496 |
+
x = torch.matmul(x, self.weight)
|
| 497 |
+
x = x.permute(1, 0, 2)
|
| 498 |
+
x = torch.reshape(x, [batch_size, -1, self.output_size])
|
| 499 |
+
x = x + self.bias
|
| 500 |
+
return x
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
class ConvBertIntermediate(nn.Module):
|
| 504 |
+
def __init__(self, config):
|
| 505 |
+
super().__init__()
|
| 506 |
+
if config.num_groups == 1:
|
| 507 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 508 |
+
else:
|
| 509 |
+
self.dense = GroupedLinearLayer(
|
| 510 |
+
input_size=config.hidden_size, output_size=config.intermediate_size, num_groups=config.num_groups
|
| 511 |
+
)
|
| 512 |
+
if isinstance(config.hidden_act, str):
|
| 513 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
| 514 |
+
else:
|
| 515 |
+
self.intermediate_act_fn = config.hidden_act
|
| 516 |
+
|
| 517 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 518 |
+
hidden_states = self.dense(hidden_states)
|
| 519 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 520 |
+
return hidden_states
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
class ConvBertOutput(nn.Module):
|
| 524 |
+
def __init__(self, config):
|
| 525 |
+
super().__init__()
|
| 526 |
+
if config.num_groups == 1:
|
| 527 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 528 |
+
else:
|
| 529 |
+
self.dense = GroupedLinearLayer(
|
| 530 |
+
input_size=config.intermediate_size, output_size=config.hidden_size, num_groups=config.num_groups
|
| 531 |
+
)
|
| 532 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 533 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 534 |
+
|
| 535 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
| 536 |
+
hidden_states = self.dense(hidden_states)
|
| 537 |
+
hidden_states = self.dropout(hidden_states)
|
| 538 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 539 |
+
return hidden_states
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
class ConvBertLayer(nn.Module):
|
| 543 |
+
def __init__(self, config):
|
| 544 |
+
super().__init__()
|
| 545 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
| 546 |
+
self.seq_len_dim = 1
|
| 547 |
+
self.attention = ConvBertAttention(config)
|
| 548 |
+
self.is_decoder = config.is_decoder
|
| 549 |
+
self.add_cross_attention = config.add_cross_attention
|
| 550 |
+
if self.add_cross_attention:
|
| 551 |
+
if not self.is_decoder:
|
| 552 |
+
raise TypeError(f"{self} should be used as a decoder model if cross attention is added")
|
| 553 |
+
self.crossattention = ConvBertAttention(config)
|
| 554 |
+
self.intermediate = ConvBertIntermediate(config)
|
| 555 |
+
self.output = ConvBertOutput(config)
|
| 556 |
+
|
| 557 |
+
def forward(
|
| 558 |
+
self,
|
| 559 |
+
hidden_states: torch.Tensor,
|
| 560 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 561 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 562 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 563 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
| 564 |
+
output_attentions: Optional[bool] = False,
|
| 565 |
+
) -> Tuple[torch.Tensor, Optional[torch.FloatTensor]]:
|
| 566 |
+
self_attention_outputs = self.attention(
|
| 567 |
+
hidden_states,
|
| 568 |
+
attention_mask,
|
| 569 |
+
head_mask,
|
| 570 |
+
output_attentions=output_attentions,
|
| 571 |
+
)
|
| 572 |
+
attention_output = self_attention_outputs[0]
|
| 573 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
| 574 |
+
|
| 575 |
+
if self.is_decoder and encoder_hidden_states is not None:
|
| 576 |
+
if not hasattr(self, "crossattention"):
|
| 577 |
+
raise AttributeError(
|
| 578 |
+
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
|
| 579 |
+
" by setting `config.add_cross_attention=True`"
|
| 580 |
+
)
|
| 581 |
+
cross_attention_outputs = self.crossattention(
|
| 582 |
+
attention_output,
|
| 583 |
+
encoder_attention_mask,
|
| 584 |
+
head_mask,
|
| 585 |
+
encoder_hidden_states,
|
| 586 |
+
output_attentions,
|
| 587 |
+
)
|
| 588 |
+
attention_output = cross_attention_outputs[0]
|
| 589 |
+
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
|
| 590 |
+
|
| 591 |
+
layer_output = apply_chunking_to_forward(
|
| 592 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
| 593 |
+
)
|
| 594 |
+
outputs = (layer_output,) + outputs
|
| 595 |
+
return outputs
|
| 596 |
+
|
| 597 |
+
def feed_forward_chunk(self, attention_output):
|
| 598 |
+
intermediate_output = self.intermediate(attention_output)
|
| 599 |
+
layer_output = self.output(intermediate_output, attention_output)
|
| 600 |
+
return layer_output
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
class ConvBertEncoder(nn.Module):
|
| 604 |
+
def __init__(self, config):
|
| 605 |
+
super().__init__()
|
| 606 |
+
self.config = config
|
| 607 |
+
self.layer = nn.ModuleList([ConvBertLayer(config) for _ in range(config.num_hidden_layers)])
|
| 608 |
+
self.gradient_checkpointing = False
|
| 609 |
+
|
| 610 |
+
def forward(
|
| 611 |
+
self,
|
| 612 |
+
hidden_states: torch.Tensor,
|
| 613 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 614 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 615 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 616 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
| 617 |
+
output_attentions: Optional[bool] = False,
|
| 618 |
+
output_hidden_states: Optional[bool] = False,
|
| 619 |
+
return_dict: Optional[bool] = True,
|
| 620 |
+
) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
|
| 621 |
+
all_hidden_states = () if output_hidden_states else None
|
| 622 |
+
all_self_attentions = () if output_attentions else None
|
| 623 |
+
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
| 624 |
+
for i, layer_module in enumerate(self.layer):
|
| 625 |
+
if output_hidden_states:
|
| 626 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 627 |
+
|
| 628 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 629 |
+
|
| 630 |
+
if self.gradient_checkpointing and self.training:
|
| 631 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 632 |
+
layer_module.__call__,
|
| 633 |
+
hidden_states,
|
| 634 |
+
attention_mask,
|
| 635 |
+
layer_head_mask,
|
| 636 |
+
encoder_hidden_states,
|
| 637 |
+
encoder_attention_mask,
|
| 638 |
+
output_attentions,
|
| 639 |
+
)
|
| 640 |
+
else:
|
| 641 |
+
layer_outputs = layer_module(
|
| 642 |
+
hidden_states,
|
| 643 |
+
attention_mask,
|
| 644 |
+
layer_head_mask,
|
| 645 |
+
encoder_hidden_states,
|
| 646 |
+
encoder_attention_mask,
|
| 647 |
+
output_attentions,
|
| 648 |
+
)
|
| 649 |
+
hidden_states = layer_outputs[0]
|
| 650 |
+
if output_attentions:
|
| 651 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 652 |
+
if self.config.add_cross_attention:
|
| 653 |
+
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
|
| 654 |
+
|
| 655 |
+
if output_hidden_states:
|
| 656 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 657 |
+
|
| 658 |
+
if not return_dict:
|
| 659 |
+
return tuple(
|
| 660 |
+
v
|
| 661 |
+
for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions]
|
| 662 |
+
if v is not None
|
| 663 |
+
)
|
| 664 |
+
return BaseModelOutputWithCrossAttentions(
|
| 665 |
+
last_hidden_state=hidden_states,
|
| 666 |
+
hidden_states=all_hidden_states,
|
| 667 |
+
attentions=all_self_attentions,
|
| 668 |
+
cross_attentions=all_cross_attentions,
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
class ConvBertPredictionHeadTransform(nn.Module):
|
| 673 |
+
def __init__(self, config):
|
| 674 |
+
super().__init__()
|
| 675 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 676 |
+
if isinstance(config.hidden_act, str):
|
| 677 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
| 678 |
+
else:
|
| 679 |
+
self.transform_act_fn = config.hidden_act
|
| 680 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 681 |
+
|
| 682 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 683 |
+
hidden_states = self.dense(hidden_states)
|
| 684 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
| 685 |
+
hidden_states = self.LayerNorm(hidden_states)
|
| 686 |
+
return hidden_states
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
CONVBERT_START_DOCSTRING = r"""
|
| 690 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
| 691 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 692 |
+
behavior.
|
| 693 |
+
|
| 694 |
+
Parameters:
|
| 695 |
+
config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
|
| 696 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 697 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 698 |
+
"""
|
| 699 |
+
|
| 700 |
+
CONVBERT_INPUTS_DOCSTRING = r"""
|
| 701 |
+
Args:
|
| 702 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
| 703 |
+
Indices of input sequence tokens in the vocabulary.
|
| 704 |
+
|
| 705 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 706 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 707 |
+
|
| 708 |
+
[What are input IDs?](../glossary#input-ids)
|
| 709 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
| 710 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
- 1 for tokens that are **not masked**,
|
| 714 |
+
- 0 for tokens that are **masked**.
|
| 715 |
+
|
| 716 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 717 |
+
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
| 718 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
| 719 |
+
1]`:
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
- 0 corresponds to a *sentence A* token,
|
| 723 |
+
- 1 corresponds to a *sentence B* token.
|
| 724 |
+
|
| 725 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
| 726 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
| 727 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 728 |
+
config.max_position_embeddings - 1]`.
|
| 729 |
+
|
| 730 |
+
[What are position IDs?](../glossary#position-ids)
|
| 731 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 732 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
- 1 indicates the head is **not masked**,
|
| 736 |
+
- 0 indicates the head is **masked**.
|
| 737 |
+
|
| 738 |
+
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
|
| 739 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 740 |
+
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
|
| 741 |
+
model's internal embedding lookup matrix.
|
| 742 |
+
output_attentions (`bool`, *optional*):
|
| 743 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 744 |
+
tensors for more detail.
|
| 745 |
+
output_hidden_states (`bool`, *optional*):
|
| 746 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 747 |
+
more detail.
|
| 748 |
+
return_dict (`bool`, *optional*):
|
| 749 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 750 |
+
"""
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
@add_start_docstrings(
|
| 754 |
+
"The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
|
| 755 |
+
CONVBERT_START_DOCSTRING,
|
| 756 |
+
)
|
| 757 |
+
class ConvBertModel(ConvBertPreTrainedModel):
|
| 758 |
+
def __init__(self, config):
|
| 759 |
+
super().__init__(config)
|
| 760 |
+
self.embeddings = ConvBertEmbeddings(config)
|
| 761 |
+
|
| 762 |
+
if config.embedding_size != config.hidden_size:
|
| 763 |
+
self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
|
| 764 |
+
|
| 765 |
+
self.encoder = ConvBertEncoder(config)
|
| 766 |
+
self.config = config
|
| 767 |
+
# Initialize weights and apply final processing
|
| 768 |
+
self.post_init()
|
| 769 |
+
|
| 770 |
+
def get_input_embeddings(self):
|
| 771 |
+
return self.embeddings.word_embeddings
|
| 772 |
+
|
| 773 |
+
def set_input_embeddings(self, value):
|
| 774 |
+
self.embeddings.word_embeddings = value
|
| 775 |
+
|
| 776 |
+
def _prune_heads(self, heads_to_prune):
|
| 777 |
+
"""
|
| 778 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 779 |
+
class PreTrainedModel
|
| 780 |
+
"""
|
| 781 |
+
for layer, heads in heads_to_prune.items():
|
| 782 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 783 |
+
|
| 784 |
+
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 785 |
+
@add_code_sample_docstrings(
|
| 786 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 787 |
+
output_type=BaseModelOutputWithCrossAttentions,
|
| 788 |
+
config_class=_CONFIG_FOR_DOC,
|
| 789 |
+
)
|
| 790 |
+
def forward(
|
| 791 |
+
self,
|
| 792 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 793 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 794 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 795 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 796 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 797 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 798 |
+
output_attentions: Optional[bool] = None,
|
| 799 |
+
output_hidden_states: Optional[bool] = None,
|
| 800 |
+
return_dict: Optional[bool] = None,
|
| 801 |
+
) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
|
| 802 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 803 |
+
output_hidden_states = (
|
| 804 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 805 |
+
)
|
| 806 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 807 |
+
|
| 808 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 809 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 810 |
+
elif input_ids is not None:
|
| 811 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
| 812 |
+
input_shape = input_ids.size()
|
| 813 |
+
elif inputs_embeds is not None:
|
| 814 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 815 |
+
else:
|
| 816 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 817 |
+
|
| 818 |
+
batch_size, seq_length = input_shape
|
| 819 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 820 |
+
|
| 821 |
+
if attention_mask is None:
|
| 822 |
+
attention_mask = torch.ones(input_shape, device=device)
|
| 823 |
+
if token_type_ids is None:
|
| 824 |
+
if hasattr(self.embeddings, "token_type_ids"):
|
| 825 |
+
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
|
| 826 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
|
| 827 |
+
token_type_ids = buffered_token_type_ids_expanded
|
| 828 |
+
else:
|
| 829 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
| 830 |
+
|
| 831 |
+
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
|
| 832 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 833 |
+
|
| 834 |
+
hidden_states = self.embeddings(
|
| 835 |
+
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
|
| 836 |
+
)
|
| 837 |
+
|
| 838 |
+
if hasattr(self, "embeddings_project"):
|
| 839 |
+
hidden_states = self.embeddings_project(hidden_states)
|
| 840 |
+
|
| 841 |
+
hidden_states = self.encoder(
|
| 842 |
+
hidden_states,
|
| 843 |
+
attention_mask=extended_attention_mask,
|
| 844 |
+
head_mask=head_mask,
|
| 845 |
+
output_attentions=output_attentions,
|
| 846 |
+
output_hidden_states=output_hidden_states,
|
| 847 |
+
return_dict=return_dict,
|
| 848 |
+
)
|
| 849 |
+
|
| 850 |
+
return hidden_states
|
| 851 |
+
|
| 852 |
+
|
| 853 |
+
class ConvBertGeneratorPredictions(nn.Module):
|
| 854 |
+
"""Prediction module for the generator, made up of two dense layers."""
|
| 855 |
+
|
| 856 |
+
def __init__(self, config):
|
| 857 |
+
super().__init__()
|
| 858 |
+
|
| 859 |
+
self.activation = get_activation("gelu")
|
| 860 |
+
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
|
| 861 |
+
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
|
| 862 |
+
|
| 863 |
+
def forward(self, generator_hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
| 864 |
+
hidden_states = self.dense(generator_hidden_states)
|
| 865 |
+
hidden_states = self.activation(hidden_states)
|
| 866 |
+
hidden_states = self.LayerNorm(hidden_states)
|
| 867 |
+
|
| 868 |
+
return hidden_states
|
| 869 |
+
|
| 870 |
+
|
| 871 |
+
@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
|
| 872 |
+
class ConvBertForMaskedLM(ConvBertPreTrainedModel):
|
| 873 |
+
_tied_weights_keys = ["generator.lm_head.weight"]
|
| 874 |
+
|
| 875 |
+
def __init__(self, config):
|
| 876 |
+
super().__init__(config)
|
| 877 |
+
|
| 878 |
+
self.convbert = ConvBertModel(config)
|
| 879 |
+
self.generator_predictions = ConvBertGeneratorPredictions(config)
|
| 880 |
+
|
| 881 |
+
self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
|
| 882 |
+
# Initialize weights and apply final processing
|
| 883 |
+
self.post_init()
|
| 884 |
+
|
| 885 |
+
def get_output_embeddings(self):
|
| 886 |
+
return self.generator_lm_head
|
| 887 |
+
|
| 888 |
+
def set_output_embeddings(self, word_embeddings):
|
| 889 |
+
self.generator_lm_head = word_embeddings
|
| 890 |
+
|
| 891 |
+
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 892 |
+
@add_code_sample_docstrings(
|
| 893 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 894 |
+
output_type=MaskedLMOutput,
|
| 895 |
+
config_class=_CONFIG_FOR_DOC,
|
| 896 |
+
)
|
| 897 |
+
def forward(
|
| 898 |
+
self,
|
| 899 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 900 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 901 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 902 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 903 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 904 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 905 |
+
labels: Optional[torch.LongTensor] = None,
|
| 906 |
+
output_attentions: Optional[bool] = None,
|
| 907 |
+
output_hidden_states: Optional[bool] = None,
|
| 908 |
+
return_dict: Optional[bool] = None,
|
| 909 |
+
) -> Union[Tuple, MaskedLMOutput]:
|
| 910 |
+
r"""
|
| 911 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 912 |
+
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
|
| 913 |
+
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
|
| 914 |
+
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
|
| 915 |
+
"""
|
| 916 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 917 |
+
|
| 918 |
+
generator_hidden_states = self.convbert(
|
| 919 |
+
input_ids,
|
| 920 |
+
attention_mask,
|
| 921 |
+
token_type_ids,
|
| 922 |
+
position_ids,
|
| 923 |
+
head_mask,
|
| 924 |
+
inputs_embeds,
|
| 925 |
+
output_attentions,
|
| 926 |
+
output_hidden_states,
|
| 927 |
+
return_dict,
|
| 928 |
+
)
|
| 929 |
+
generator_sequence_output = generator_hidden_states[0]
|
| 930 |
+
|
| 931 |
+
prediction_scores = self.generator_predictions(generator_sequence_output)
|
| 932 |
+
prediction_scores = self.generator_lm_head(prediction_scores)
|
| 933 |
+
|
| 934 |
+
loss = None
|
| 935 |
+
# Masked language modeling softmax layer
|
| 936 |
+
if labels is not None:
|
| 937 |
+
loss_fct = nn.CrossEntropyLoss() # -100 index = padding token
|
| 938 |
+
loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
|
| 939 |
+
|
| 940 |
+
if not return_dict:
|
| 941 |
+
output = (prediction_scores,) + generator_hidden_states[1:]
|
| 942 |
+
return ((loss,) + output) if loss is not None else output
|
| 943 |
+
|
| 944 |
+
return MaskedLMOutput(
|
| 945 |
+
loss=loss,
|
| 946 |
+
logits=prediction_scores,
|
| 947 |
+
hidden_states=generator_hidden_states.hidden_states,
|
| 948 |
+
attentions=generator_hidden_states.attentions,
|
| 949 |
+
)
|
| 950 |
+
|
| 951 |
+
|
| 952 |
+
class ConvBertClassificationHead(nn.Module):
|
| 953 |
+
"""Head for sentence-level classification tasks."""
|
| 954 |
+
|
| 955 |
+
def __init__(self, config):
|
| 956 |
+
super().__init__()
|
| 957 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 958 |
+
classifier_dropout = (
|
| 959 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
| 960 |
+
)
|
| 961 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 962 |
+
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
|
| 963 |
+
|
| 964 |
+
self.config = config
|
| 965 |
+
|
| 966 |
+
def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
|
| 967 |
+
x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
|
| 968 |
+
x = self.dropout(x)
|
| 969 |
+
x = self.dense(x)
|
| 970 |
+
x = ACT2FN[self.config.hidden_act](x)
|
| 971 |
+
x = self.dropout(x)
|
| 972 |
+
x = self.out_proj(x)
|
| 973 |
+
return x
|
| 974 |
+
|
| 975 |
+
|
| 976 |
+
@add_start_docstrings(
|
| 977 |
+
"""
|
| 978 |
+
ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
|
| 979 |
+
pooled output) e.g. for GLUE tasks.
|
| 980 |
+
""",
|
| 981 |
+
CONVBERT_START_DOCSTRING,
|
| 982 |
+
)
|
| 983 |
+
class ConvBertForSequenceClassification(ConvBertPreTrainedModel):
|
| 984 |
+
def __init__(self, config):
|
| 985 |
+
super().__init__(config)
|
| 986 |
+
self.num_labels = config.num_labels
|
| 987 |
+
self.config = config
|
| 988 |
+
self.convbert = ConvBertModel(config)
|
| 989 |
+
self.classifier = ConvBertClassificationHead(config)
|
| 990 |
+
|
| 991 |
+
# Initialize weights and apply final processing
|
| 992 |
+
self.post_init()
|
| 993 |
+
|
| 994 |
+
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 995 |
+
@add_code_sample_docstrings(
|
| 996 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 997 |
+
output_type=SequenceClassifierOutput,
|
| 998 |
+
config_class=_CONFIG_FOR_DOC,
|
| 999 |
+
)
|
| 1000 |
+
def forward(
|
| 1001 |
+
self,
|
| 1002 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1003 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 1004 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 1005 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1006 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 1007 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1008 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1009 |
+
output_attentions: Optional[bool] = None,
|
| 1010 |
+
output_hidden_states: Optional[bool] = None,
|
| 1011 |
+
return_dict: Optional[bool] = None,
|
| 1012 |
+
) -> Union[Tuple, SequenceClassifierOutput]:
|
| 1013 |
+
r"""
|
| 1014 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1015 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1016 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1017 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1018 |
+
"""
|
| 1019 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1020 |
+
|
| 1021 |
+
outputs = self.convbert(
|
| 1022 |
+
input_ids,
|
| 1023 |
+
attention_mask=attention_mask,
|
| 1024 |
+
token_type_ids=token_type_ids,
|
| 1025 |
+
position_ids=position_ids,
|
| 1026 |
+
head_mask=head_mask,
|
| 1027 |
+
inputs_embeds=inputs_embeds,
|
| 1028 |
+
output_attentions=output_attentions,
|
| 1029 |
+
output_hidden_states=output_hidden_states,
|
| 1030 |
+
return_dict=return_dict,
|
| 1031 |
+
)
|
| 1032 |
+
|
| 1033 |
+
sequence_output = outputs[0]
|
| 1034 |
+
logits = self.classifier(sequence_output)
|
| 1035 |
+
|
| 1036 |
+
loss = None
|
| 1037 |
+
if labels is not None:
|
| 1038 |
+
if self.config.problem_type is None:
|
| 1039 |
+
if self.num_labels == 1:
|
| 1040 |
+
self.config.problem_type = "regression"
|
| 1041 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1042 |
+
self.config.problem_type = "single_label_classification"
|
| 1043 |
+
else:
|
| 1044 |
+
self.config.problem_type = "multi_label_classification"
|
| 1045 |
+
|
| 1046 |
+
if self.config.problem_type == "regression":
|
| 1047 |
+
loss_fct = MSELoss()
|
| 1048 |
+
if self.num_labels == 1:
|
| 1049 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 1050 |
+
else:
|
| 1051 |
+
loss = loss_fct(logits, labels)
|
| 1052 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1053 |
+
loss_fct = CrossEntropyLoss()
|
| 1054 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 1055 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1056 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1057 |
+
loss = loss_fct(logits, labels)
|
| 1058 |
+
|
| 1059 |
+
if not return_dict:
|
| 1060 |
+
output = (logits,) + outputs[1:]
|
| 1061 |
+
return ((loss,) + output) if loss is not None else output
|
| 1062 |
+
|
| 1063 |
+
return SequenceClassifierOutput(
|
| 1064 |
+
loss=loss,
|
| 1065 |
+
logits=logits,
|
| 1066 |
+
hidden_states=outputs.hidden_states,
|
| 1067 |
+
attentions=outputs.attentions,
|
| 1068 |
+
)
|
| 1069 |
+
|
| 1070 |
+
|
| 1071 |
+
@add_start_docstrings(
|
| 1072 |
+
"""
|
| 1073 |
+
ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
|
| 1074 |
+
softmax) e.g. for RocStories/SWAG tasks.
|
| 1075 |
+
""",
|
| 1076 |
+
CONVBERT_START_DOCSTRING,
|
| 1077 |
+
)
|
| 1078 |
+
class ConvBertForMultipleChoice(ConvBertPreTrainedModel):
|
| 1079 |
+
def __init__(self, config):
|
| 1080 |
+
super().__init__(config)
|
| 1081 |
+
|
| 1082 |
+
self.convbert = ConvBertModel(config)
|
| 1083 |
+
self.sequence_summary = SequenceSummary(config)
|
| 1084 |
+
self.classifier = nn.Linear(config.hidden_size, 1)
|
| 1085 |
+
|
| 1086 |
+
# Initialize weights and apply final processing
|
| 1087 |
+
self.post_init()
|
| 1088 |
+
|
| 1089 |
+
@add_start_docstrings_to_model_forward(
|
| 1090 |
+
CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
|
| 1091 |
+
)
|
| 1092 |
+
@add_code_sample_docstrings(
|
| 1093 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1094 |
+
output_type=MultipleChoiceModelOutput,
|
| 1095 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1096 |
+
)
|
| 1097 |
+
def forward(
|
| 1098 |
+
self,
|
| 1099 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1100 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 1101 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 1102 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1103 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 1104 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1105 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1106 |
+
output_attentions: Optional[bool] = None,
|
| 1107 |
+
output_hidden_states: Optional[bool] = None,
|
| 1108 |
+
return_dict: Optional[bool] = None,
|
| 1109 |
+
) -> Union[Tuple, MultipleChoiceModelOutput]:
|
| 1110 |
+
r"""
|
| 1111 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1112 |
+
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
|
| 1113 |
+
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
|
| 1114 |
+
`input_ids` above)
|
| 1115 |
+
"""
|
| 1116 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1117 |
+
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
|
| 1118 |
+
|
| 1119 |
+
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
|
| 1120 |
+
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
|
| 1121 |
+
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
|
| 1122 |
+
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
|
| 1123 |
+
inputs_embeds = (
|
| 1124 |
+
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
|
| 1125 |
+
if inputs_embeds is not None
|
| 1126 |
+
else None
|
| 1127 |
+
)
|
| 1128 |
+
|
| 1129 |
+
outputs = self.convbert(
|
| 1130 |
+
input_ids,
|
| 1131 |
+
attention_mask=attention_mask,
|
| 1132 |
+
token_type_ids=token_type_ids,
|
| 1133 |
+
position_ids=position_ids,
|
| 1134 |
+
head_mask=head_mask,
|
| 1135 |
+
inputs_embeds=inputs_embeds,
|
| 1136 |
+
output_attentions=output_attentions,
|
| 1137 |
+
output_hidden_states=output_hidden_states,
|
| 1138 |
+
return_dict=return_dict,
|
| 1139 |
+
)
|
| 1140 |
+
|
| 1141 |
+
sequence_output = outputs[0]
|
| 1142 |
+
|
| 1143 |
+
pooled_output = self.sequence_summary(sequence_output)
|
| 1144 |
+
logits = self.classifier(pooled_output)
|
| 1145 |
+
reshaped_logits = logits.view(-1, num_choices)
|
| 1146 |
+
|
| 1147 |
+
loss = None
|
| 1148 |
+
if labels is not None:
|
| 1149 |
+
loss_fct = CrossEntropyLoss()
|
| 1150 |
+
loss = loss_fct(reshaped_logits, labels)
|
| 1151 |
+
|
| 1152 |
+
if not return_dict:
|
| 1153 |
+
output = (reshaped_logits,) + outputs[1:]
|
| 1154 |
+
return ((loss,) + output) if loss is not None else output
|
| 1155 |
+
|
| 1156 |
+
return MultipleChoiceModelOutput(
|
| 1157 |
+
loss=loss,
|
| 1158 |
+
logits=reshaped_logits,
|
| 1159 |
+
hidden_states=outputs.hidden_states,
|
| 1160 |
+
attentions=outputs.attentions,
|
| 1161 |
+
)
|
| 1162 |
+
|
| 1163 |
+
|
| 1164 |
+
@add_start_docstrings(
|
| 1165 |
+
"""
|
| 1166 |
+
ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
|
| 1167 |
+
Named-Entity-Recognition (NER) tasks.
|
| 1168 |
+
""",
|
| 1169 |
+
CONVBERT_START_DOCSTRING,
|
| 1170 |
+
)
|
| 1171 |
+
class ConvBertForTokenClassification(ConvBertPreTrainedModel):
|
| 1172 |
+
def __init__(self, config):
|
| 1173 |
+
super().__init__(config)
|
| 1174 |
+
self.num_labels = config.num_labels
|
| 1175 |
+
|
| 1176 |
+
self.convbert = ConvBertModel(config)
|
| 1177 |
+
classifier_dropout = (
|
| 1178 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
| 1179 |
+
)
|
| 1180 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 1181 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
| 1182 |
+
|
| 1183 |
+
# Initialize weights and apply final processing
|
| 1184 |
+
self.post_init()
|
| 1185 |
+
|
| 1186 |
+
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1187 |
+
@add_code_sample_docstrings(
|
| 1188 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1189 |
+
output_type=TokenClassifierOutput,
|
| 1190 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1191 |
+
)
|
| 1192 |
+
def forward(
|
| 1193 |
+
self,
|
| 1194 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1195 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 1196 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 1197 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1198 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 1199 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1200 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1201 |
+
output_attentions: Optional[bool] = None,
|
| 1202 |
+
output_hidden_states: Optional[bool] = None,
|
| 1203 |
+
return_dict: Optional[bool] = None,
|
| 1204 |
+
) -> Union[Tuple, TokenClassifierOutput]:
|
| 1205 |
+
r"""
|
| 1206 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1207 |
+
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
|
| 1208 |
+
"""
|
| 1209 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1210 |
+
|
| 1211 |
+
outputs = self.convbert(
|
| 1212 |
+
input_ids,
|
| 1213 |
+
attention_mask=attention_mask,
|
| 1214 |
+
token_type_ids=token_type_ids,
|
| 1215 |
+
position_ids=position_ids,
|
| 1216 |
+
head_mask=head_mask,
|
| 1217 |
+
inputs_embeds=inputs_embeds,
|
| 1218 |
+
output_attentions=output_attentions,
|
| 1219 |
+
output_hidden_states=output_hidden_states,
|
| 1220 |
+
return_dict=return_dict,
|
| 1221 |
+
)
|
| 1222 |
+
|
| 1223 |
+
sequence_output = outputs[0]
|
| 1224 |
+
|
| 1225 |
+
sequence_output = self.dropout(sequence_output)
|
| 1226 |
+
logits = self.classifier(sequence_output)
|
| 1227 |
+
|
| 1228 |
+
loss = None
|
| 1229 |
+
if labels is not None:
|
| 1230 |
+
loss_fct = CrossEntropyLoss()
|
| 1231 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 1232 |
+
|
| 1233 |
+
if not return_dict:
|
| 1234 |
+
output = (logits,) + outputs[1:]
|
| 1235 |
+
return ((loss,) + output) if loss is not None else output
|
| 1236 |
+
|
| 1237 |
+
return TokenClassifierOutput(
|
| 1238 |
+
loss=loss,
|
| 1239 |
+
logits=logits,
|
| 1240 |
+
hidden_states=outputs.hidden_states,
|
| 1241 |
+
attentions=outputs.attentions,
|
| 1242 |
+
)
|
| 1243 |
+
|
| 1244 |
+
|
| 1245 |
+
@add_start_docstrings(
|
| 1246 |
+
"""
|
| 1247 |
+
ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
|
| 1248 |
+
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
| 1249 |
+
""",
|
| 1250 |
+
CONVBERT_START_DOCSTRING,
|
| 1251 |
+
)
|
| 1252 |
+
class ConvBertForQuestionAnswering(ConvBertPreTrainedModel):
|
| 1253 |
+
def __init__(self, config):
|
| 1254 |
+
super().__init__(config)
|
| 1255 |
+
|
| 1256 |
+
self.num_labels = config.num_labels
|
| 1257 |
+
self.convbert = ConvBertModel(config)
|
| 1258 |
+
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
|
| 1259 |
+
|
| 1260 |
+
# Initialize weights and apply final processing
|
| 1261 |
+
self.post_init()
|
| 1262 |
+
|
| 1263 |
+
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1264 |
+
@add_code_sample_docstrings(
|
| 1265 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1266 |
+
output_type=QuestionAnsweringModelOutput,
|
| 1267 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1268 |
+
)
|
| 1269 |
+
def forward(
|
| 1270 |
+
self,
|
| 1271 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1272 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 1273 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 1274 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1275 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 1276 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1277 |
+
start_positions: Optional[torch.LongTensor] = None,
|
| 1278 |
+
end_positions: Optional[torch.LongTensor] = None,
|
| 1279 |
+
output_attentions: Optional[bool] = None,
|
| 1280 |
+
output_hidden_states: Optional[bool] = None,
|
| 1281 |
+
return_dict: Optional[bool] = None,
|
| 1282 |
+
) -> Union[Tuple, QuestionAnsweringModelOutput]:
|
| 1283 |
+
r"""
|
| 1284 |
+
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1285 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
| 1286 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1287 |
+
are not taken into account for computing the loss.
|
| 1288 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1289 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
| 1290 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1291 |
+
are not taken into account for computing the loss.
|
| 1292 |
+
"""
|
| 1293 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1294 |
+
|
| 1295 |
+
outputs = self.convbert(
|
| 1296 |
+
input_ids,
|
| 1297 |
+
attention_mask=attention_mask,
|
| 1298 |
+
token_type_ids=token_type_ids,
|
| 1299 |
+
position_ids=position_ids,
|
| 1300 |
+
head_mask=head_mask,
|
| 1301 |
+
inputs_embeds=inputs_embeds,
|
| 1302 |
+
output_attentions=output_attentions,
|
| 1303 |
+
output_hidden_states=output_hidden_states,
|
| 1304 |
+
return_dict=return_dict,
|
| 1305 |
+
)
|
| 1306 |
+
|
| 1307 |
+
sequence_output = outputs[0]
|
| 1308 |
+
|
| 1309 |
+
logits = self.qa_outputs(sequence_output)
|
| 1310 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
| 1311 |
+
start_logits = start_logits.squeeze(-1).contiguous()
|
| 1312 |
+
end_logits = end_logits.squeeze(-1).contiguous()
|
| 1313 |
+
|
| 1314 |
+
total_loss = None
|
| 1315 |
+
if start_positions is not None and end_positions is not None:
|
| 1316 |
+
# If we are on multi-GPU, split add a dimension
|
| 1317 |
+
if len(start_positions.size()) > 1:
|
| 1318 |
+
start_positions = start_positions.squeeze(-1)
|
| 1319 |
+
if len(end_positions.size()) > 1:
|
| 1320 |
+
end_positions = end_positions.squeeze(-1)
|
| 1321 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
| 1322 |
+
ignored_index = start_logits.size(1)
|
| 1323 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
| 1324 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
| 1325 |
+
|
| 1326 |
+
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
| 1327 |
+
start_loss = loss_fct(start_logits, start_positions)
|
| 1328 |
+
end_loss = loss_fct(end_logits, end_positions)
|
| 1329 |
+
total_loss = (start_loss + end_loss) / 2
|
| 1330 |
+
|
| 1331 |
+
if not return_dict:
|
| 1332 |
+
output = (start_logits, end_logits) + outputs[1:]
|
| 1333 |
+
return ((total_loss,) + output) if total_loss is not None else output
|
| 1334 |
+
|
| 1335 |
+
return QuestionAnsweringModelOutput(
|
| 1336 |
+
loss=total_loss,
|
| 1337 |
+
start_logits=start_logits,
|
| 1338 |
+
end_logits=end_logits,
|
| 1339 |
+
hidden_states=outputs.hidden_states,
|
| 1340 |
+
attentions=outputs.attentions,
|
| 1341 |
+
)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/modeling_tf_convbert.py
ADDED
|
@@ -0,0 +1,1472 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" TF 2.0 ConvBERT model."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
from __future__ import annotations
|
| 19 |
+
|
| 20 |
+
from typing import Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import tensorflow as tf
|
| 24 |
+
|
| 25 |
+
from ...activations_tf import get_tf_activation
|
| 26 |
+
from ...modeling_tf_outputs import (
|
| 27 |
+
TFBaseModelOutput,
|
| 28 |
+
TFMaskedLMOutput,
|
| 29 |
+
TFMultipleChoiceModelOutput,
|
| 30 |
+
TFQuestionAnsweringModelOutput,
|
| 31 |
+
TFSequenceClassifierOutput,
|
| 32 |
+
TFTokenClassifierOutput,
|
| 33 |
+
)
|
| 34 |
+
from ...modeling_tf_utils import (
|
| 35 |
+
TFMaskedLanguageModelingLoss,
|
| 36 |
+
TFModelInputType,
|
| 37 |
+
TFMultipleChoiceLoss,
|
| 38 |
+
TFPreTrainedModel,
|
| 39 |
+
TFQuestionAnsweringLoss,
|
| 40 |
+
TFSequenceClassificationLoss,
|
| 41 |
+
TFSequenceSummary,
|
| 42 |
+
TFTokenClassificationLoss,
|
| 43 |
+
get_initializer,
|
| 44 |
+
keras,
|
| 45 |
+
keras_serializable,
|
| 46 |
+
unpack_inputs,
|
| 47 |
+
)
|
| 48 |
+
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
|
| 49 |
+
from ...utils import (
|
| 50 |
+
add_code_sample_docstrings,
|
| 51 |
+
add_start_docstrings,
|
| 52 |
+
add_start_docstrings_to_model_forward,
|
| 53 |
+
logging,
|
| 54 |
+
)
|
| 55 |
+
from .configuration_convbert import ConvBertConfig
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
logger = logging.get_logger(__name__)
|
| 59 |
+
|
| 60 |
+
_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
|
| 61 |
+
_CONFIG_FOR_DOC = "ConvBertConfig"
|
| 62 |
+
|
| 63 |
+
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 64 |
+
"YituTech/conv-bert-base",
|
| 65 |
+
"YituTech/conv-bert-medium-small",
|
| 66 |
+
"YituTech/conv-bert-small",
|
| 67 |
+
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert
|
| 72 |
+
class TFConvBertEmbeddings(keras.layers.Layer):
|
| 73 |
+
"""Construct the embeddings from word, position and token_type embeddings."""
|
| 74 |
+
|
| 75 |
+
def __init__(self, config: ConvBertConfig, **kwargs):
|
| 76 |
+
super().__init__(**kwargs)
|
| 77 |
+
|
| 78 |
+
self.config = config
|
| 79 |
+
self.embedding_size = config.embedding_size
|
| 80 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 81 |
+
self.initializer_range = config.initializer_range
|
| 82 |
+
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
|
| 83 |
+
self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
|
| 84 |
+
|
| 85 |
+
def build(self, input_shape=None):
|
| 86 |
+
with tf.name_scope("word_embeddings"):
|
| 87 |
+
self.weight = self.add_weight(
|
| 88 |
+
name="weight",
|
| 89 |
+
shape=[self.config.vocab_size, self.embedding_size],
|
| 90 |
+
initializer=get_initializer(self.initializer_range),
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
with tf.name_scope("token_type_embeddings"):
|
| 94 |
+
self.token_type_embeddings = self.add_weight(
|
| 95 |
+
name="embeddings",
|
| 96 |
+
shape=[self.config.type_vocab_size, self.embedding_size],
|
| 97 |
+
initializer=get_initializer(self.initializer_range),
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
with tf.name_scope("position_embeddings"):
|
| 101 |
+
self.position_embeddings = self.add_weight(
|
| 102 |
+
name="embeddings",
|
| 103 |
+
shape=[self.max_position_embeddings, self.embedding_size],
|
| 104 |
+
initializer=get_initializer(self.initializer_range),
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
if self.built:
|
| 108 |
+
return
|
| 109 |
+
self.built = True
|
| 110 |
+
if getattr(self, "LayerNorm", None) is not None:
|
| 111 |
+
with tf.name_scope(self.LayerNorm.name):
|
| 112 |
+
self.LayerNorm.build([None, None, self.config.embedding_size])
|
| 113 |
+
|
| 114 |
+
# Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
|
| 115 |
+
def call(
|
| 116 |
+
self,
|
| 117 |
+
input_ids: tf.Tensor = None,
|
| 118 |
+
position_ids: tf.Tensor = None,
|
| 119 |
+
token_type_ids: tf.Tensor = None,
|
| 120 |
+
inputs_embeds: tf.Tensor = None,
|
| 121 |
+
past_key_values_length=0,
|
| 122 |
+
training: bool = False,
|
| 123 |
+
) -> tf.Tensor:
|
| 124 |
+
"""
|
| 125 |
+
Applies embedding based on inputs tensor.
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
final_embeddings (`tf.Tensor`): output embedding tensor.
|
| 129 |
+
"""
|
| 130 |
+
if input_ids is None and inputs_embeds is None:
|
| 131 |
+
raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
|
| 132 |
+
|
| 133 |
+
if input_ids is not None:
|
| 134 |
+
check_embeddings_within_bounds(input_ids, self.config.vocab_size)
|
| 135 |
+
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
|
| 136 |
+
|
| 137 |
+
input_shape = shape_list(inputs_embeds)[:-1]
|
| 138 |
+
|
| 139 |
+
if token_type_ids is None:
|
| 140 |
+
token_type_ids = tf.fill(dims=input_shape, value=0)
|
| 141 |
+
|
| 142 |
+
if position_ids is None:
|
| 143 |
+
position_ids = tf.expand_dims(
|
| 144 |
+
tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
|
| 148 |
+
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
|
| 149 |
+
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
|
| 150 |
+
final_embeddings = self.LayerNorm(inputs=final_embeddings)
|
| 151 |
+
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
|
| 152 |
+
|
| 153 |
+
return final_embeddings
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
class TFConvBertSelfAttention(keras.layers.Layer):
|
| 157 |
+
def __init__(self, config, **kwargs):
|
| 158 |
+
super().__init__(**kwargs)
|
| 159 |
+
|
| 160 |
+
if config.hidden_size % config.num_attention_heads != 0:
|
| 161 |
+
raise ValueError(
|
| 162 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
| 163 |
+
f"heads ({config.num_attention_heads})"
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
|
| 167 |
+
if new_num_attention_heads < 1:
|
| 168 |
+
self.head_ratio = config.num_attention_heads
|
| 169 |
+
num_attention_heads = 1
|
| 170 |
+
else:
|
| 171 |
+
num_attention_heads = new_num_attention_heads
|
| 172 |
+
self.head_ratio = config.head_ratio
|
| 173 |
+
|
| 174 |
+
self.num_attention_heads = num_attention_heads
|
| 175 |
+
self.conv_kernel_size = config.conv_kernel_size
|
| 176 |
+
|
| 177 |
+
if config.hidden_size % self.num_attention_heads != 0:
|
| 178 |
+
raise ValueError("hidden_size should be divisible by num_attention_heads")
|
| 179 |
+
|
| 180 |
+
self.attention_head_size = config.hidden_size // config.num_attention_heads
|
| 181 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 182 |
+
self.query = keras.layers.Dense(
|
| 183 |
+
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
|
| 184 |
+
)
|
| 185 |
+
self.key = keras.layers.Dense(
|
| 186 |
+
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
|
| 187 |
+
)
|
| 188 |
+
self.value = keras.layers.Dense(
|
| 189 |
+
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
self.key_conv_attn_layer = keras.layers.SeparableConv1D(
|
| 193 |
+
self.all_head_size,
|
| 194 |
+
self.conv_kernel_size,
|
| 195 |
+
padding="same",
|
| 196 |
+
activation=None,
|
| 197 |
+
depthwise_initializer=get_initializer(1 / self.conv_kernel_size),
|
| 198 |
+
pointwise_initializer=get_initializer(config.initializer_range),
|
| 199 |
+
name="key_conv_attn_layer",
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
self.conv_kernel_layer = keras.layers.Dense(
|
| 203 |
+
self.num_attention_heads * self.conv_kernel_size,
|
| 204 |
+
activation=None,
|
| 205 |
+
name="conv_kernel_layer",
|
| 206 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
self.conv_out_layer = keras.layers.Dense(
|
| 210 |
+
self.all_head_size,
|
| 211 |
+
activation=None,
|
| 212 |
+
name="conv_out_layer",
|
| 213 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
|
| 217 |
+
self.config = config
|
| 218 |
+
|
| 219 |
+
def transpose_for_scores(self, x, batch_size):
|
| 220 |
+
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
|
| 221 |
+
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
|
| 222 |
+
return tf.transpose(x, perm=[0, 2, 1, 3])
|
| 223 |
+
|
| 224 |
+
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
|
| 225 |
+
batch_size = shape_list(hidden_states)[0]
|
| 226 |
+
mixed_query_layer = self.query(hidden_states)
|
| 227 |
+
mixed_key_layer = self.key(hidden_states)
|
| 228 |
+
mixed_value_layer = self.value(hidden_states)
|
| 229 |
+
|
| 230 |
+
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
|
| 231 |
+
|
| 232 |
+
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
|
| 233 |
+
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
|
| 234 |
+
conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
|
| 235 |
+
|
| 236 |
+
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
|
| 237 |
+
conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
|
| 238 |
+
conv_kernel_layer = stable_softmax(conv_kernel_layer, axis=1)
|
| 239 |
+
|
| 240 |
+
paddings = tf.constant(
|
| 241 |
+
[
|
| 242 |
+
[
|
| 243 |
+
0,
|
| 244 |
+
0,
|
| 245 |
+
],
|
| 246 |
+
[int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)],
|
| 247 |
+
[0, 0],
|
| 248 |
+
]
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
conv_out_layer = self.conv_out_layer(hidden_states)
|
| 252 |
+
conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
|
| 253 |
+
conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT")
|
| 254 |
+
|
| 255 |
+
unfold_conv_out_layer = tf.stack(
|
| 256 |
+
[
|
| 257 |
+
tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size])
|
| 258 |
+
for i in range(self.conv_kernel_size)
|
| 259 |
+
],
|
| 260 |
+
axis=-1,
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
|
| 264 |
+
|
| 265 |
+
conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
|
| 266 |
+
conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
|
| 267 |
+
|
| 268 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 269 |
+
attention_scores = tf.matmul(
|
| 270 |
+
query_layer, key_layer, transpose_b=True
|
| 271 |
+
) # (batch size, num_heads, seq_len_q, seq_len_k)
|
| 272 |
+
dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores
|
| 273 |
+
attention_scores = attention_scores / tf.math.sqrt(dk)
|
| 274 |
+
|
| 275 |
+
if attention_mask is not None:
|
| 276 |
+
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
|
| 277 |
+
attention_scores = attention_scores + attention_mask
|
| 278 |
+
|
| 279 |
+
# Normalize the attention scores to probabilities.
|
| 280 |
+
attention_probs = stable_softmax(attention_scores, axis=-1)
|
| 281 |
+
|
| 282 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 283 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 284 |
+
attention_probs = self.dropout(attention_probs, training=training)
|
| 285 |
+
|
| 286 |
+
# Mask heads if we want to
|
| 287 |
+
if head_mask is not None:
|
| 288 |
+
attention_probs = attention_probs * head_mask
|
| 289 |
+
|
| 290 |
+
value_layer = tf.reshape(
|
| 291 |
+
mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]
|
| 292 |
+
)
|
| 293 |
+
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
|
| 294 |
+
|
| 295 |
+
context_layer = tf.matmul(attention_probs, value_layer)
|
| 296 |
+
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
|
| 297 |
+
|
| 298 |
+
conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
|
| 299 |
+
context_layer = tf.concat([context_layer, conv_out], 2)
|
| 300 |
+
context_layer = tf.reshape(
|
| 301 |
+
context_layer, (batch_size, -1, self.head_ratio * self.all_head_size)
|
| 302 |
+
) # (batch_size, seq_len_q, all_head_size)
|
| 303 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 304 |
+
|
| 305 |
+
return outputs
|
| 306 |
+
|
| 307 |
+
def build(self, input_shape=None):
|
| 308 |
+
if self.built:
|
| 309 |
+
return
|
| 310 |
+
self.built = True
|
| 311 |
+
if getattr(self, "query", None) is not None:
|
| 312 |
+
with tf.name_scope(self.query.name):
|
| 313 |
+
self.query.build([None, None, self.config.hidden_size])
|
| 314 |
+
if getattr(self, "key", None) is not None:
|
| 315 |
+
with tf.name_scope(self.key.name):
|
| 316 |
+
self.key.build([None, None, self.config.hidden_size])
|
| 317 |
+
if getattr(self, "value", None) is not None:
|
| 318 |
+
with tf.name_scope(self.value.name):
|
| 319 |
+
self.value.build([None, None, self.config.hidden_size])
|
| 320 |
+
if getattr(self, "key_conv_attn_layer", None) is not None:
|
| 321 |
+
with tf.name_scope(self.key_conv_attn_layer.name):
|
| 322 |
+
self.key_conv_attn_layer.build([None, None, self.config.hidden_size])
|
| 323 |
+
if getattr(self, "conv_kernel_layer", None) is not None:
|
| 324 |
+
with tf.name_scope(self.conv_kernel_layer.name):
|
| 325 |
+
self.conv_kernel_layer.build([None, None, self.all_head_size])
|
| 326 |
+
if getattr(self, "conv_out_layer", None) is not None:
|
| 327 |
+
with tf.name_scope(self.conv_out_layer.name):
|
| 328 |
+
self.conv_out_layer.build([None, None, self.config.hidden_size])
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
class TFConvBertSelfOutput(keras.layers.Layer):
|
| 332 |
+
def __init__(self, config, **kwargs):
|
| 333 |
+
super().__init__(**kwargs)
|
| 334 |
+
|
| 335 |
+
self.dense = keras.layers.Dense(
|
| 336 |
+
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
|
| 337 |
+
)
|
| 338 |
+
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
|
| 339 |
+
self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
|
| 340 |
+
self.config = config
|
| 341 |
+
|
| 342 |
+
def call(self, hidden_states, input_tensor, training=False):
|
| 343 |
+
hidden_states = self.dense(hidden_states)
|
| 344 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
| 345 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 346 |
+
|
| 347 |
+
return hidden_states
|
| 348 |
+
|
| 349 |
+
def build(self, input_shape=None):
|
| 350 |
+
if self.built:
|
| 351 |
+
return
|
| 352 |
+
self.built = True
|
| 353 |
+
if getattr(self, "dense", None) is not None:
|
| 354 |
+
with tf.name_scope(self.dense.name):
|
| 355 |
+
self.dense.build([None, None, self.config.hidden_size])
|
| 356 |
+
if getattr(self, "LayerNorm", None) is not None:
|
| 357 |
+
with tf.name_scope(self.LayerNorm.name):
|
| 358 |
+
self.LayerNorm.build([None, None, self.config.hidden_size])
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
class TFConvBertAttention(keras.layers.Layer):
|
| 362 |
+
def __init__(self, config, **kwargs):
|
| 363 |
+
super().__init__(**kwargs)
|
| 364 |
+
|
| 365 |
+
self.self_attention = TFConvBertSelfAttention(config, name="self")
|
| 366 |
+
self.dense_output = TFConvBertSelfOutput(config, name="output")
|
| 367 |
+
|
| 368 |
+
def prune_heads(self, heads):
|
| 369 |
+
raise NotImplementedError
|
| 370 |
+
|
| 371 |
+
def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
|
| 372 |
+
self_outputs = self.self_attention(
|
| 373 |
+
input_tensor, attention_mask, head_mask, output_attentions, training=training
|
| 374 |
+
)
|
| 375 |
+
attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
|
| 376 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
| 377 |
+
|
| 378 |
+
return outputs
|
| 379 |
+
|
| 380 |
+
def build(self, input_shape=None):
|
| 381 |
+
if self.built:
|
| 382 |
+
return
|
| 383 |
+
self.built = True
|
| 384 |
+
if getattr(self, "self_attention", None) is not None:
|
| 385 |
+
with tf.name_scope(self.self_attention.name):
|
| 386 |
+
self.self_attention.build(None)
|
| 387 |
+
if getattr(self, "dense_output", None) is not None:
|
| 388 |
+
with tf.name_scope(self.dense_output.name):
|
| 389 |
+
self.dense_output.build(None)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
class GroupedLinearLayer(keras.layers.Layer):
|
| 393 |
+
def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
|
| 394 |
+
super().__init__(**kwargs)
|
| 395 |
+
self.input_size = input_size
|
| 396 |
+
self.output_size = output_size
|
| 397 |
+
self.num_groups = num_groups
|
| 398 |
+
self.kernel_initializer = kernel_initializer
|
| 399 |
+
self.group_in_dim = self.input_size // self.num_groups
|
| 400 |
+
self.group_out_dim = self.output_size // self.num_groups
|
| 401 |
+
|
| 402 |
+
def build(self, input_shape=None):
|
| 403 |
+
self.kernel = self.add_weight(
|
| 404 |
+
"kernel",
|
| 405 |
+
shape=[self.group_out_dim, self.group_in_dim, self.num_groups],
|
| 406 |
+
initializer=self.kernel_initializer,
|
| 407 |
+
trainable=True,
|
| 408 |
+
)
|
| 409 |
+
|
| 410 |
+
self.bias = self.add_weight(
|
| 411 |
+
"bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True
|
| 412 |
+
)
|
| 413 |
+
super().build(input_shape)
|
| 414 |
+
|
| 415 |
+
def call(self, hidden_states):
|
| 416 |
+
batch_size = shape_list(hidden_states)[0]
|
| 417 |
+
x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
|
| 418 |
+
x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
|
| 419 |
+
x = tf.transpose(x, [1, 0, 2])
|
| 420 |
+
x = tf.reshape(x, [batch_size, -1, self.output_size])
|
| 421 |
+
x = tf.nn.bias_add(value=x, bias=self.bias)
|
| 422 |
+
return x
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
class TFConvBertIntermediate(keras.layers.Layer):
|
| 426 |
+
def __init__(self, config, **kwargs):
|
| 427 |
+
super().__init__(**kwargs)
|
| 428 |
+
if config.num_groups == 1:
|
| 429 |
+
self.dense = keras.layers.Dense(
|
| 430 |
+
config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
|
| 431 |
+
)
|
| 432 |
+
else:
|
| 433 |
+
self.dense = GroupedLinearLayer(
|
| 434 |
+
config.hidden_size,
|
| 435 |
+
config.intermediate_size,
|
| 436 |
+
num_groups=config.num_groups,
|
| 437 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 438 |
+
name="dense",
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
if isinstance(config.hidden_act, str):
|
| 442 |
+
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
|
| 443 |
+
else:
|
| 444 |
+
self.intermediate_act_fn = config.hidden_act
|
| 445 |
+
self.config = config
|
| 446 |
+
|
| 447 |
+
def call(self, hidden_states):
|
| 448 |
+
hidden_states = self.dense(hidden_states)
|
| 449 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 450 |
+
|
| 451 |
+
return hidden_states
|
| 452 |
+
|
| 453 |
+
def build(self, input_shape=None):
|
| 454 |
+
if self.built:
|
| 455 |
+
return
|
| 456 |
+
self.built = True
|
| 457 |
+
if getattr(self, "dense", None) is not None:
|
| 458 |
+
with tf.name_scope(self.dense.name):
|
| 459 |
+
self.dense.build([None, None, self.config.hidden_size])
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
class TFConvBertOutput(keras.layers.Layer):
|
| 463 |
+
def __init__(self, config, **kwargs):
|
| 464 |
+
super().__init__(**kwargs)
|
| 465 |
+
|
| 466 |
+
if config.num_groups == 1:
|
| 467 |
+
self.dense = keras.layers.Dense(
|
| 468 |
+
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
|
| 469 |
+
)
|
| 470 |
+
else:
|
| 471 |
+
self.dense = GroupedLinearLayer(
|
| 472 |
+
config.intermediate_size,
|
| 473 |
+
config.hidden_size,
|
| 474 |
+
num_groups=config.num_groups,
|
| 475 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 476 |
+
name="dense",
|
| 477 |
+
)
|
| 478 |
+
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
|
| 479 |
+
self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
|
| 480 |
+
self.config = config
|
| 481 |
+
|
| 482 |
+
def call(self, hidden_states, input_tensor, training=False):
|
| 483 |
+
hidden_states = self.dense(hidden_states)
|
| 484 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
| 485 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 486 |
+
|
| 487 |
+
return hidden_states
|
| 488 |
+
|
| 489 |
+
def build(self, input_shape=None):
|
| 490 |
+
if self.built:
|
| 491 |
+
return
|
| 492 |
+
self.built = True
|
| 493 |
+
if getattr(self, "LayerNorm", None) is not None:
|
| 494 |
+
with tf.name_scope(self.LayerNorm.name):
|
| 495 |
+
self.LayerNorm.build([None, None, self.config.hidden_size])
|
| 496 |
+
if getattr(self, "dense", None) is not None:
|
| 497 |
+
with tf.name_scope(self.dense.name):
|
| 498 |
+
self.dense.build([None, None, self.config.intermediate_size])
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class TFConvBertLayer(keras.layers.Layer):
|
| 502 |
+
def __init__(self, config, **kwargs):
|
| 503 |
+
super().__init__(**kwargs)
|
| 504 |
+
|
| 505 |
+
self.attention = TFConvBertAttention(config, name="attention")
|
| 506 |
+
self.intermediate = TFConvBertIntermediate(config, name="intermediate")
|
| 507 |
+
self.bert_output = TFConvBertOutput(config, name="output")
|
| 508 |
+
|
| 509 |
+
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
|
| 510 |
+
attention_outputs = self.attention(
|
| 511 |
+
hidden_states, attention_mask, head_mask, output_attentions, training=training
|
| 512 |
+
)
|
| 513 |
+
attention_output = attention_outputs[0]
|
| 514 |
+
intermediate_output = self.intermediate(attention_output)
|
| 515 |
+
layer_output = self.bert_output(intermediate_output, attention_output, training=training)
|
| 516 |
+
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
|
| 517 |
+
|
| 518 |
+
return outputs
|
| 519 |
+
|
| 520 |
+
def build(self, input_shape=None):
|
| 521 |
+
if self.built:
|
| 522 |
+
return
|
| 523 |
+
self.built = True
|
| 524 |
+
if getattr(self, "attention", None) is not None:
|
| 525 |
+
with tf.name_scope(self.attention.name):
|
| 526 |
+
self.attention.build(None)
|
| 527 |
+
if getattr(self, "intermediate", None) is not None:
|
| 528 |
+
with tf.name_scope(self.intermediate.name):
|
| 529 |
+
self.intermediate.build(None)
|
| 530 |
+
if getattr(self, "bert_output", None) is not None:
|
| 531 |
+
with tf.name_scope(self.bert_output.name):
|
| 532 |
+
self.bert_output.build(None)
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
class TFConvBertEncoder(keras.layers.Layer):
|
| 536 |
+
def __init__(self, config, **kwargs):
|
| 537 |
+
super().__init__(**kwargs)
|
| 538 |
+
|
| 539 |
+
self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
|
| 540 |
+
|
| 541 |
+
def call(
|
| 542 |
+
self,
|
| 543 |
+
hidden_states,
|
| 544 |
+
attention_mask,
|
| 545 |
+
head_mask,
|
| 546 |
+
output_attentions,
|
| 547 |
+
output_hidden_states,
|
| 548 |
+
return_dict,
|
| 549 |
+
training=False,
|
| 550 |
+
):
|
| 551 |
+
all_hidden_states = () if output_hidden_states else None
|
| 552 |
+
all_attentions = () if output_attentions else None
|
| 553 |
+
|
| 554 |
+
for i, layer_module in enumerate(self.layer):
|
| 555 |
+
if output_hidden_states:
|
| 556 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 557 |
+
|
| 558 |
+
layer_outputs = layer_module(
|
| 559 |
+
hidden_states, attention_mask, head_mask[i], output_attentions, training=training
|
| 560 |
+
)
|
| 561 |
+
hidden_states = layer_outputs[0]
|
| 562 |
+
|
| 563 |
+
if output_attentions:
|
| 564 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
| 565 |
+
|
| 566 |
+
# Add last layer
|
| 567 |
+
if output_hidden_states:
|
| 568 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 569 |
+
|
| 570 |
+
if not return_dict:
|
| 571 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
|
| 572 |
+
|
| 573 |
+
return TFBaseModelOutput(
|
| 574 |
+
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
|
| 575 |
+
)
|
| 576 |
+
|
| 577 |
+
def build(self, input_shape=None):
|
| 578 |
+
if self.built:
|
| 579 |
+
return
|
| 580 |
+
self.built = True
|
| 581 |
+
if getattr(self, "layer", None) is not None:
|
| 582 |
+
for layer in self.layer:
|
| 583 |
+
with tf.name_scope(layer.name):
|
| 584 |
+
layer.build(None)
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
class TFConvBertPredictionHeadTransform(keras.layers.Layer):
|
| 588 |
+
def __init__(self, config, **kwargs):
|
| 589 |
+
super().__init__(**kwargs)
|
| 590 |
+
|
| 591 |
+
self.dense = keras.layers.Dense(
|
| 592 |
+
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
|
| 593 |
+
)
|
| 594 |
+
|
| 595 |
+
if isinstance(config.hidden_act, str):
|
| 596 |
+
self.transform_act_fn = get_tf_activation(config.hidden_act)
|
| 597 |
+
else:
|
| 598 |
+
self.transform_act_fn = config.hidden_act
|
| 599 |
+
|
| 600 |
+
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
|
| 601 |
+
self.config = config
|
| 602 |
+
|
| 603 |
+
def call(self, hidden_states):
|
| 604 |
+
hidden_states = self.dense(hidden_states)
|
| 605 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
| 606 |
+
hidden_states = self.LayerNorm(hidden_states)
|
| 607 |
+
|
| 608 |
+
return hidden_states
|
| 609 |
+
|
| 610 |
+
def build(self, input_shape=None):
|
| 611 |
+
if self.built:
|
| 612 |
+
return
|
| 613 |
+
self.built = True
|
| 614 |
+
if getattr(self, "dense", None) is not None:
|
| 615 |
+
with tf.name_scope(self.dense.name):
|
| 616 |
+
self.dense.build([None, None, self.config.hidden_size])
|
| 617 |
+
if getattr(self, "LayerNorm", None) is not None:
|
| 618 |
+
with tf.name_scope(self.LayerNorm.name):
|
| 619 |
+
self.LayerNorm.build([None, None, self.config.hidden_size])
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
@keras_serializable
|
| 623 |
+
class TFConvBertMainLayer(keras.layers.Layer):
|
| 624 |
+
config_class = ConvBertConfig
|
| 625 |
+
|
| 626 |
+
def __init__(self, config, **kwargs):
|
| 627 |
+
super().__init__(**kwargs)
|
| 628 |
+
|
| 629 |
+
self.embeddings = TFConvBertEmbeddings(config, name="embeddings")
|
| 630 |
+
|
| 631 |
+
if config.embedding_size != config.hidden_size:
|
| 632 |
+
self.embeddings_project = keras.layers.Dense(config.hidden_size, name="embeddings_project")
|
| 633 |
+
|
| 634 |
+
self.encoder = TFConvBertEncoder(config, name="encoder")
|
| 635 |
+
self.config = config
|
| 636 |
+
|
| 637 |
+
def get_input_embeddings(self):
|
| 638 |
+
return self.embeddings
|
| 639 |
+
|
| 640 |
+
def set_input_embeddings(self, value):
|
| 641 |
+
self.embeddings.weight = value
|
| 642 |
+
self.embeddings.vocab_size = value.shape[0]
|
| 643 |
+
|
| 644 |
+
def _prune_heads(self, heads_to_prune):
|
| 645 |
+
"""
|
| 646 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 647 |
+
class PreTrainedModel
|
| 648 |
+
"""
|
| 649 |
+
raise NotImplementedError
|
| 650 |
+
|
| 651 |
+
def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
|
| 652 |
+
if attention_mask is None:
|
| 653 |
+
attention_mask = tf.fill(input_shape, 1)
|
| 654 |
+
|
| 655 |
+
# We create a 3D attention mask from a 2D tensor mask.
|
| 656 |
+
# Sizes are [batch_size, 1, 1, to_seq_length]
|
| 657 |
+
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
| 658 |
+
# this attention mask is more simple than the triangular masking of causal attention
|
| 659 |
+
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
| 660 |
+
extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
|
| 661 |
+
|
| 662 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
| 663 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
| 664 |
+
# positions we want to attend and -10000.0 for masked positions.
|
| 665 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
| 666 |
+
# effectively the same as removing these entirely.
|
| 667 |
+
extended_attention_mask = tf.cast(extended_attention_mask, dtype)
|
| 668 |
+
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
| 669 |
+
|
| 670 |
+
return extended_attention_mask
|
| 671 |
+
|
| 672 |
+
def get_head_mask(self, head_mask):
|
| 673 |
+
if head_mask is not None:
|
| 674 |
+
raise NotImplementedError
|
| 675 |
+
else:
|
| 676 |
+
head_mask = [None] * self.config.num_hidden_layers
|
| 677 |
+
|
| 678 |
+
return head_mask
|
| 679 |
+
|
| 680 |
+
@unpack_inputs
|
| 681 |
+
def call(
|
| 682 |
+
self,
|
| 683 |
+
input_ids=None,
|
| 684 |
+
attention_mask=None,
|
| 685 |
+
token_type_ids=None,
|
| 686 |
+
position_ids=None,
|
| 687 |
+
head_mask=None,
|
| 688 |
+
inputs_embeds=None,
|
| 689 |
+
output_attentions=None,
|
| 690 |
+
output_hidden_states=None,
|
| 691 |
+
return_dict=None,
|
| 692 |
+
training=False,
|
| 693 |
+
):
|
| 694 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 695 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 696 |
+
elif input_ids is not None:
|
| 697 |
+
input_shape = shape_list(input_ids)
|
| 698 |
+
elif inputs_embeds is not None:
|
| 699 |
+
input_shape = shape_list(inputs_embeds)[:-1]
|
| 700 |
+
else:
|
| 701 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 702 |
+
|
| 703 |
+
if attention_mask is None:
|
| 704 |
+
attention_mask = tf.fill(input_shape, 1)
|
| 705 |
+
|
| 706 |
+
if token_type_ids is None:
|
| 707 |
+
token_type_ids = tf.fill(input_shape, 0)
|
| 708 |
+
|
| 709 |
+
hidden_states = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)
|
| 710 |
+
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, hidden_states.dtype)
|
| 711 |
+
head_mask = self.get_head_mask(head_mask)
|
| 712 |
+
|
| 713 |
+
if hasattr(self, "embeddings_project"):
|
| 714 |
+
hidden_states = self.embeddings_project(hidden_states, training=training)
|
| 715 |
+
|
| 716 |
+
hidden_states = self.encoder(
|
| 717 |
+
hidden_states,
|
| 718 |
+
extended_attention_mask,
|
| 719 |
+
head_mask,
|
| 720 |
+
output_attentions,
|
| 721 |
+
output_hidden_states,
|
| 722 |
+
return_dict,
|
| 723 |
+
training=training,
|
| 724 |
+
)
|
| 725 |
+
|
| 726 |
+
return hidden_states
|
| 727 |
+
|
| 728 |
+
def build(self, input_shape=None):
|
| 729 |
+
if self.built:
|
| 730 |
+
return
|
| 731 |
+
self.built = True
|
| 732 |
+
if getattr(self, "embeddings", None) is not None:
|
| 733 |
+
with tf.name_scope(self.embeddings.name):
|
| 734 |
+
self.embeddings.build(None)
|
| 735 |
+
if getattr(self, "encoder", None) is not None:
|
| 736 |
+
with tf.name_scope(self.encoder.name):
|
| 737 |
+
self.encoder.build(None)
|
| 738 |
+
if getattr(self, "embeddings_project", None) is not None:
|
| 739 |
+
with tf.name_scope(self.embeddings_project.name):
|
| 740 |
+
self.embeddings_project.build([None, None, self.config.embedding_size])
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
class TFConvBertPreTrainedModel(TFPreTrainedModel):
|
| 744 |
+
"""
|
| 745 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 746 |
+
models.
|
| 747 |
+
"""
|
| 748 |
+
|
| 749 |
+
config_class = ConvBertConfig
|
| 750 |
+
base_model_prefix = "convbert"
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
CONVBERT_START_DOCSTRING = r"""
|
| 754 |
+
|
| 755 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 756 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 757 |
+
etc.)
|
| 758 |
+
|
| 759 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
| 760 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
| 761 |
+
behavior.
|
| 762 |
+
|
| 763 |
+
<Tip>
|
| 764 |
+
|
| 765 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
| 766 |
+
|
| 767 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
| 768 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
| 769 |
+
|
| 770 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
| 771 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
| 772 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
| 773 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
| 774 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
| 775 |
+
positional argument:
|
| 776 |
+
|
| 777 |
+
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
|
| 778 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
| 779 |
+
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
|
| 780 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
| 781 |
+
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
|
| 782 |
+
|
| 783 |
+
Note that when creating models and layers with
|
| 784 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
| 785 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
| 786 |
+
|
| 787 |
+
</Tip>
|
| 788 |
+
|
| 789 |
+
Args:
|
| 790 |
+
config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
|
| 791 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 792 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 793 |
+
"""
|
| 794 |
+
|
| 795 |
+
CONVBERT_INPUTS_DOCSTRING = r"""
|
| 796 |
+
Args:
|
| 797 |
+
input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
|
| 798 |
+
Indices of input sequence tokens in the vocabulary.
|
| 799 |
+
|
| 800 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
| 801 |
+
[`PreTrainedTokenizer.encode`] for details.
|
| 802 |
+
|
| 803 |
+
[What are input IDs?](../glossary#input-ids)
|
| 804 |
+
attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
|
| 805 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 806 |
+
|
| 807 |
+
- 1 for tokens that are **not masked**,
|
| 808 |
+
- 0 for tokens that are **masked**.
|
| 809 |
+
|
| 810 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 811 |
+
token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
|
| 812 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
| 813 |
+
1]`:
|
| 814 |
+
|
| 815 |
+
- 0 corresponds to a *sentence A* token,
|
| 816 |
+
- 1 corresponds to a *sentence B* token.
|
| 817 |
+
|
| 818 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
| 819 |
+
position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
|
| 820 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 821 |
+
config.max_position_embeddings - 1]`.
|
| 822 |
+
|
| 823 |
+
[What are position IDs?](../glossary#position-ids)
|
| 824 |
+
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 825 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 826 |
+
|
| 827 |
+
- 1 indicates the head is **not masked**,
|
| 828 |
+
- 0 indicates the head is **masked**.
|
| 829 |
+
|
| 830 |
+
inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
|
| 831 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 832 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 833 |
+
model's internal embedding lookup matrix.
|
| 834 |
+
output_attentions (`bool`, *optional*):
|
| 835 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 836 |
+
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
|
| 837 |
+
config will be used instead.
|
| 838 |
+
output_hidden_states (`bool`, *optional*):
|
| 839 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 840 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
| 841 |
+
used instead.
|
| 842 |
+
return_dict (`bool`, *optional*):
|
| 843 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
| 844 |
+
eager mode, in graph mode the value will always be set to True.
|
| 845 |
+
training (`bool`, *optional*, defaults to `False`):
|
| 846 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
| 847 |
+
behaviors between training and evaluation).
|
| 848 |
+
"""
|
| 849 |
+
|
| 850 |
+
|
| 851 |
+
@add_start_docstrings(
|
| 852 |
+
"The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
|
| 853 |
+
CONVBERT_START_DOCSTRING,
|
| 854 |
+
)
|
| 855 |
+
class TFConvBertModel(TFConvBertPreTrainedModel):
|
| 856 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 857 |
+
super().__init__(config, *inputs, **kwargs)
|
| 858 |
+
|
| 859 |
+
self.convbert = TFConvBertMainLayer(config, name="convbert")
|
| 860 |
+
|
| 861 |
+
@unpack_inputs
|
| 862 |
+
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 863 |
+
@add_code_sample_docstrings(
|
| 864 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 865 |
+
output_type=TFBaseModelOutput,
|
| 866 |
+
config_class=_CONFIG_FOR_DOC,
|
| 867 |
+
)
|
| 868 |
+
def call(
|
| 869 |
+
self,
|
| 870 |
+
input_ids: TFModelInputType | None = None,
|
| 871 |
+
attention_mask: Optional[Union[np.array, tf.Tensor]] = None,
|
| 872 |
+
token_type_ids: Optional[Union[np.array, tf.Tensor]] = None,
|
| 873 |
+
position_ids: Optional[Union[np.array, tf.Tensor]] = None,
|
| 874 |
+
head_mask: Optional[Union[np.array, tf.Tensor]] = None,
|
| 875 |
+
inputs_embeds: tf.Tensor | None = None,
|
| 876 |
+
output_attentions: Optional[bool] = None,
|
| 877 |
+
output_hidden_states: Optional[bool] = None,
|
| 878 |
+
return_dict: Optional[bool] = None,
|
| 879 |
+
training: bool = False,
|
| 880 |
+
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
|
| 881 |
+
outputs = self.convbert(
|
| 882 |
+
input_ids=input_ids,
|
| 883 |
+
attention_mask=attention_mask,
|
| 884 |
+
token_type_ids=token_type_ids,
|
| 885 |
+
position_ids=position_ids,
|
| 886 |
+
head_mask=head_mask,
|
| 887 |
+
inputs_embeds=inputs_embeds,
|
| 888 |
+
output_attentions=output_attentions,
|
| 889 |
+
output_hidden_states=output_hidden_states,
|
| 890 |
+
return_dict=return_dict,
|
| 891 |
+
training=training,
|
| 892 |
+
)
|
| 893 |
+
|
| 894 |
+
return outputs
|
| 895 |
+
|
| 896 |
+
def build(self, input_shape=None):
|
| 897 |
+
if self.built:
|
| 898 |
+
return
|
| 899 |
+
self.built = True
|
| 900 |
+
if getattr(self, "convbert", None) is not None:
|
| 901 |
+
with tf.name_scope(self.convbert.name):
|
| 902 |
+
self.convbert.build(None)
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
class TFConvBertMaskedLMHead(keras.layers.Layer):
|
| 906 |
+
def __init__(self, config, input_embeddings, **kwargs):
|
| 907 |
+
super().__init__(**kwargs)
|
| 908 |
+
|
| 909 |
+
self.config = config
|
| 910 |
+
self.embedding_size = config.embedding_size
|
| 911 |
+
self.input_embeddings = input_embeddings
|
| 912 |
+
|
| 913 |
+
def build(self, input_shape):
|
| 914 |
+
self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
|
| 915 |
+
|
| 916 |
+
super().build(input_shape)
|
| 917 |
+
|
| 918 |
+
def get_output_embeddings(self):
|
| 919 |
+
return self.input_embeddings
|
| 920 |
+
|
| 921 |
+
def set_output_embeddings(self, value):
|
| 922 |
+
self.input_embeddings.weight = value
|
| 923 |
+
self.input_embeddings.vocab_size = shape_list(value)[0]
|
| 924 |
+
|
| 925 |
+
def get_bias(self):
|
| 926 |
+
return {"bias": self.bias}
|
| 927 |
+
|
| 928 |
+
def set_bias(self, value):
|
| 929 |
+
self.bias = value["bias"]
|
| 930 |
+
self.config.vocab_size = shape_list(value["bias"])[0]
|
| 931 |
+
|
| 932 |
+
def call(self, hidden_states):
|
| 933 |
+
seq_length = shape_list(tensor=hidden_states)[1]
|
| 934 |
+
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
|
| 935 |
+
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
|
| 936 |
+
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
|
| 937 |
+
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
|
| 938 |
+
|
| 939 |
+
return hidden_states
|
| 940 |
+
|
| 941 |
+
|
| 942 |
+
class TFConvBertGeneratorPredictions(keras.layers.Layer):
|
| 943 |
+
def __init__(self, config, **kwargs):
|
| 944 |
+
super().__init__(**kwargs)
|
| 945 |
+
|
| 946 |
+
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
|
| 947 |
+
self.dense = keras.layers.Dense(config.embedding_size, name="dense")
|
| 948 |
+
self.config = config
|
| 949 |
+
|
| 950 |
+
def call(self, generator_hidden_states, training=False):
|
| 951 |
+
hidden_states = self.dense(generator_hidden_states)
|
| 952 |
+
hidden_states = get_tf_activation("gelu")(hidden_states)
|
| 953 |
+
hidden_states = self.LayerNorm(hidden_states)
|
| 954 |
+
|
| 955 |
+
return hidden_states
|
| 956 |
+
|
| 957 |
+
def build(self, input_shape=None):
|
| 958 |
+
if self.built:
|
| 959 |
+
return
|
| 960 |
+
self.built = True
|
| 961 |
+
if getattr(self, "LayerNorm", None) is not None:
|
| 962 |
+
with tf.name_scope(self.LayerNorm.name):
|
| 963 |
+
self.LayerNorm.build([None, None, self.config.embedding_size])
|
| 964 |
+
if getattr(self, "dense", None) is not None:
|
| 965 |
+
with tf.name_scope(self.dense.name):
|
| 966 |
+
self.dense.build([None, None, self.config.hidden_size])
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
|
| 970 |
+
class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):
|
| 971 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 972 |
+
super().__init__(config, **kwargs)
|
| 973 |
+
|
| 974 |
+
self.config = config
|
| 975 |
+
self.convbert = TFConvBertMainLayer(config, name="convbert")
|
| 976 |
+
self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions")
|
| 977 |
+
|
| 978 |
+
if isinstance(config.hidden_act, str):
|
| 979 |
+
self.activation = get_tf_activation(config.hidden_act)
|
| 980 |
+
else:
|
| 981 |
+
self.activation = config.hidden_act
|
| 982 |
+
|
| 983 |
+
self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head")
|
| 984 |
+
|
| 985 |
+
def get_lm_head(self):
|
| 986 |
+
return self.generator_lm_head
|
| 987 |
+
|
| 988 |
+
def get_prefix_bias_name(self):
|
| 989 |
+
return self.name + "/" + self.generator_lm_head.name
|
| 990 |
+
|
| 991 |
+
@unpack_inputs
|
| 992 |
+
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 993 |
+
@add_code_sample_docstrings(
|
| 994 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 995 |
+
output_type=TFMaskedLMOutput,
|
| 996 |
+
config_class=_CONFIG_FOR_DOC,
|
| 997 |
+
)
|
| 998 |
+
def call(
|
| 999 |
+
self,
|
| 1000 |
+
input_ids: TFModelInputType | None = None,
|
| 1001 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 1002 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 1003 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 1004 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 1005 |
+
inputs_embeds: tf.Tensor | None = None,
|
| 1006 |
+
output_attentions: Optional[bool] = None,
|
| 1007 |
+
output_hidden_states: Optional[bool] = None,
|
| 1008 |
+
return_dict: Optional[bool] = None,
|
| 1009 |
+
labels: tf.Tensor | None = None,
|
| 1010 |
+
training: Optional[bool] = False,
|
| 1011 |
+
) -> Union[Tuple, TFMaskedLMOutput]:
|
| 1012 |
+
r"""
|
| 1013 |
+
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1014 |
+
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
|
| 1015 |
+
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
|
| 1016 |
+
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
|
| 1017 |
+
"""
|
| 1018 |
+
generator_hidden_states = self.convbert(
|
| 1019 |
+
input_ids=input_ids,
|
| 1020 |
+
attention_mask=attention_mask,
|
| 1021 |
+
token_type_ids=token_type_ids,
|
| 1022 |
+
position_ids=position_ids,
|
| 1023 |
+
head_mask=head_mask,
|
| 1024 |
+
inputs_embeds=inputs_embeds,
|
| 1025 |
+
output_attentions=output_attentions,
|
| 1026 |
+
output_hidden_states=output_hidden_states,
|
| 1027 |
+
return_dict=return_dict,
|
| 1028 |
+
training=training,
|
| 1029 |
+
)
|
| 1030 |
+
generator_sequence_output = generator_hidden_states[0]
|
| 1031 |
+
prediction_scores = self.generator_predictions(generator_sequence_output, training=training)
|
| 1032 |
+
prediction_scores = self.generator_lm_head(prediction_scores, training=training)
|
| 1033 |
+
loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
|
| 1034 |
+
|
| 1035 |
+
if not return_dict:
|
| 1036 |
+
output = (prediction_scores,) + generator_hidden_states[1:]
|
| 1037 |
+
|
| 1038 |
+
return ((loss,) + output) if loss is not None else output
|
| 1039 |
+
|
| 1040 |
+
return TFMaskedLMOutput(
|
| 1041 |
+
loss=loss,
|
| 1042 |
+
logits=prediction_scores,
|
| 1043 |
+
hidden_states=generator_hidden_states.hidden_states,
|
| 1044 |
+
attentions=generator_hidden_states.attentions,
|
| 1045 |
+
)
|
| 1046 |
+
|
| 1047 |
+
def build(self, input_shape=None):
|
| 1048 |
+
if self.built:
|
| 1049 |
+
return
|
| 1050 |
+
self.built = True
|
| 1051 |
+
if getattr(self, "convbert", None) is not None:
|
| 1052 |
+
with tf.name_scope(self.convbert.name):
|
| 1053 |
+
self.convbert.build(None)
|
| 1054 |
+
if getattr(self, "generator_predictions", None) is not None:
|
| 1055 |
+
with tf.name_scope(self.generator_predictions.name):
|
| 1056 |
+
self.generator_predictions.build(None)
|
| 1057 |
+
if getattr(self, "generator_lm_head", None) is not None:
|
| 1058 |
+
with tf.name_scope(self.generator_lm_head.name):
|
| 1059 |
+
self.generator_lm_head.build(None)
|
| 1060 |
+
|
| 1061 |
+
|
| 1062 |
+
class TFConvBertClassificationHead(keras.layers.Layer):
|
| 1063 |
+
"""Head for sentence-level classification tasks."""
|
| 1064 |
+
|
| 1065 |
+
def __init__(self, config, **kwargs):
|
| 1066 |
+
super().__init__(**kwargs)
|
| 1067 |
+
|
| 1068 |
+
self.dense = keras.layers.Dense(
|
| 1069 |
+
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
|
| 1070 |
+
)
|
| 1071 |
+
classifier_dropout = (
|
| 1072 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
| 1073 |
+
)
|
| 1074 |
+
self.dropout = keras.layers.Dropout(classifier_dropout)
|
| 1075 |
+
self.out_proj = keras.layers.Dense(
|
| 1076 |
+
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
|
| 1077 |
+
)
|
| 1078 |
+
|
| 1079 |
+
self.config = config
|
| 1080 |
+
|
| 1081 |
+
def call(self, hidden_states, **kwargs):
|
| 1082 |
+
x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
|
| 1083 |
+
x = self.dropout(x)
|
| 1084 |
+
x = self.dense(x)
|
| 1085 |
+
x = get_tf_activation(self.config.hidden_act)(x)
|
| 1086 |
+
x = self.dropout(x)
|
| 1087 |
+
x = self.out_proj(x)
|
| 1088 |
+
|
| 1089 |
+
return x
|
| 1090 |
+
|
| 1091 |
+
def build(self, input_shape=None):
|
| 1092 |
+
if self.built:
|
| 1093 |
+
return
|
| 1094 |
+
self.built = True
|
| 1095 |
+
if getattr(self, "dense", None) is not None:
|
| 1096 |
+
with tf.name_scope(self.dense.name):
|
| 1097 |
+
self.dense.build([None, None, self.config.hidden_size])
|
| 1098 |
+
if getattr(self, "out_proj", None) is not None:
|
| 1099 |
+
with tf.name_scope(self.out_proj.name):
|
| 1100 |
+
self.out_proj.build([None, None, self.config.hidden_size])
|
| 1101 |
+
|
| 1102 |
+
|
| 1103 |
+
@add_start_docstrings(
|
| 1104 |
+
"""
|
| 1105 |
+
ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
|
| 1106 |
+
""",
|
| 1107 |
+
CONVBERT_START_DOCSTRING,
|
| 1108 |
+
)
|
| 1109 |
+
class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):
|
| 1110 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 1111 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1112 |
+
self.num_labels = config.num_labels
|
| 1113 |
+
self.convbert = TFConvBertMainLayer(config, name="convbert")
|
| 1114 |
+
self.classifier = TFConvBertClassificationHead(config, name="classifier")
|
| 1115 |
+
|
| 1116 |
+
@unpack_inputs
|
| 1117 |
+
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1118 |
+
@add_code_sample_docstrings(
|
| 1119 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1120 |
+
output_type=TFSequenceClassifierOutput,
|
| 1121 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1122 |
+
)
|
| 1123 |
+
def call(
|
| 1124 |
+
self,
|
| 1125 |
+
input_ids: TFModelInputType | None = None,
|
| 1126 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 1127 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 1128 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 1129 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 1130 |
+
inputs_embeds: tf.Tensor | None = None,
|
| 1131 |
+
output_attentions: Optional[bool] = None,
|
| 1132 |
+
output_hidden_states: Optional[bool] = None,
|
| 1133 |
+
return_dict: Optional[bool] = None,
|
| 1134 |
+
labels: tf.Tensor | None = None,
|
| 1135 |
+
training: Optional[bool] = False,
|
| 1136 |
+
) -> Union[Tuple, TFSequenceClassifierOutput]:
|
| 1137 |
+
r"""
|
| 1138 |
+
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
|
| 1139 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1140 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1141 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1142 |
+
"""
|
| 1143 |
+
outputs = self.convbert(
|
| 1144 |
+
input_ids,
|
| 1145 |
+
attention_mask=attention_mask,
|
| 1146 |
+
token_type_ids=token_type_ids,
|
| 1147 |
+
position_ids=position_ids,
|
| 1148 |
+
head_mask=head_mask,
|
| 1149 |
+
inputs_embeds=inputs_embeds,
|
| 1150 |
+
output_attentions=output_attentions,
|
| 1151 |
+
output_hidden_states=output_hidden_states,
|
| 1152 |
+
return_dict=return_dict,
|
| 1153 |
+
training=training,
|
| 1154 |
+
)
|
| 1155 |
+
logits = self.classifier(outputs[0], training=training)
|
| 1156 |
+
loss = None if labels is None else self.hf_compute_loss(labels, logits)
|
| 1157 |
+
|
| 1158 |
+
if not return_dict:
|
| 1159 |
+
output = (logits,) + outputs[1:]
|
| 1160 |
+
|
| 1161 |
+
return ((loss,) + output) if loss is not None else output
|
| 1162 |
+
|
| 1163 |
+
return TFSequenceClassifierOutput(
|
| 1164 |
+
loss=loss,
|
| 1165 |
+
logits=logits,
|
| 1166 |
+
hidden_states=outputs.hidden_states,
|
| 1167 |
+
attentions=outputs.attentions,
|
| 1168 |
+
)
|
| 1169 |
+
|
| 1170 |
+
def build(self, input_shape=None):
|
| 1171 |
+
if self.built:
|
| 1172 |
+
return
|
| 1173 |
+
self.built = True
|
| 1174 |
+
if getattr(self, "convbert", None) is not None:
|
| 1175 |
+
with tf.name_scope(self.convbert.name):
|
| 1176 |
+
self.convbert.build(None)
|
| 1177 |
+
if getattr(self, "classifier", None) is not None:
|
| 1178 |
+
with tf.name_scope(self.classifier.name):
|
| 1179 |
+
self.classifier.build(None)
|
| 1180 |
+
|
| 1181 |
+
|
| 1182 |
+
@add_start_docstrings(
|
| 1183 |
+
"""
|
| 1184 |
+
ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
|
| 1185 |
+
softmax) e.g. for RocStories/SWAG tasks.
|
| 1186 |
+
""",
|
| 1187 |
+
CONVBERT_START_DOCSTRING,
|
| 1188 |
+
)
|
| 1189 |
+
class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):
|
| 1190 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 1191 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1192 |
+
|
| 1193 |
+
self.convbert = TFConvBertMainLayer(config, name="convbert")
|
| 1194 |
+
self.sequence_summary = TFSequenceSummary(
|
| 1195 |
+
config, initializer_range=config.initializer_range, name="sequence_summary"
|
| 1196 |
+
)
|
| 1197 |
+
self.classifier = keras.layers.Dense(
|
| 1198 |
+
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
|
| 1199 |
+
)
|
| 1200 |
+
self.config = config
|
| 1201 |
+
|
| 1202 |
+
@unpack_inputs
|
| 1203 |
+
@add_start_docstrings_to_model_forward(
|
| 1204 |
+
CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
|
| 1205 |
+
)
|
| 1206 |
+
@add_code_sample_docstrings(
|
| 1207 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1208 |
+
output_type=TFMultipleChoiceModelOutput,
|
| 1209 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1210 |
+
)
|
| 1211 |
+
def call(
|
| 1212 |
+
self,
|
| 1213 |
+
input_ids: TFModelInputType | None = None,
|
| 1214 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 1215 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 1216 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 1217 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 1218 |
+
inputs_embeds: tf.Tensor | None = None,
|
| 1219 |
+
output_attentions: Optional[bool] = None,
|
| 1220 |
+
output_hidden_states: Optional[bool] = None,
|
| 1221 |
+
return_dict: Optional[bool] = None,
|
| 1222 |
+
labels: tf.Tensor | None = None,
|
| 1223 |
+
training: Optional[bool] = False,
|
| 1224 |
+
) -> Union[Tuple, TFMultipleChoiceModelOutput]:
|
| 1225 |
+
r"""
|
| 1226 |
+
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
|
| 1227 |
+
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
|
| 1228 |
+
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
|
| 1229 |
+
"""
|
| 1230 |
+
if input_ids is not None:
|
| 1231 |
+
num_choices = shape_list(input_ids)[1]
|
| 1232 |
+
seq_length = shape_list(input_ids)[2]
|
| 1233 |
+
else:
|
| 1234 |
+
num_choices = shape_list(inputs_embeds)[1]
|
| 1235 |
+
seq_length = shape_list(inputs_embeds)[2]
|
| 1236 |
+
|
| 1237 |
+
flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
|
| 1238 |
+
flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
|
| 1239 |
+
flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
|
| 1240 |
+
flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
|
| 1241 |
+
flat_inputs_embeds = (
|
| 1242 |
+
tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
|
| 1243 |
+
if inputs_embeds is not None
|
| 1244 |
+
else None
|
| 1245 |
+
)
|
| 1246 |
+
outputs = self.convbert(
|
| 1247 |
+
flat_input_ids,
|
| 1248 |
+
flat_attention_mask,
|
| 1249 |
+
flat_token_type_ids,
|
| 1250 |
+
flat_position_ids,
|
| 1251 |
+
head_mask,
|
| 1252 |
+
flat_inputs_embeds,
|
| 1253 |
+
output_attentions,
|
| 1254 |
+
output_hidden_states,
|
| 1255 |
+
return_dict=return_dict,
|
| 1256 |
+
training=training,
|
| 1257 |
+
)
|
| 1258 |
+
logits = self.sequence_summary(outputs[0], training=training)
|
| 1259 |
+
logits = self.classifier(logits)
|
| 1260 |
+
reshaped_logits = tf.reshape(logits, (-1, num_choices))
|
| 1261 |
+
loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
|
| 1262 |
+
|
| 1263 |
+
if not return_dict:
|
| 1264 |
+
output = (reshaped_logits,) + outputs[1:]
|
| 1265 |
+
|
| 1266 |
+
return ((loss,) + output) if loss is not None else output
|
| 1267 |
+
|
| 1268 |
+
return TFMultipleChoiceModelOutput(
|
| 1269 |
+
loss=loss,
|
| 1270 |
+
logits=reshaped_logits,
|
| 1271 |
+
hidden_states=outputs.hidden_states,
|
| 1272 |
+
attentions=outputs.attentions,
|
| 1273 |
+
)
|
| 1274 |
+
|
| 1275 |
+
def build(self, input_shape=None):
|
| 1276 |
+
if self.built:
|
| 1277 |
+
return
|
| 1278 |
+
self.built = True
|
| 1279 |
+
if getattr(self, "convbert", None) is not None:
|
| 1280 |
+
with tf.name_scope(self.convbert.name):
|
| 1281 |
+
self.convbert.build(None)
|
| 1282 |
+
if getattr(self, "sequence_summary", None) is not None:
|
| 1283 |
+
with tf.name_scope(self.sequence_summary.name):
|
| 1284 |
+
self.sequence_summary.build(None)
|
| 1285 |
+
if getattr(self, "classifier", None) is not None:
|
| 1286 |
+
with tf.name_scope(self.classifier.name):
|
| 1287 |
+
self.classifier.build([None, None, self.config.hidden_size])
|
| 1288 |
+
|
| 1289 |
+
|
| 1290 |
+
@add_start_docstrings(
|
| 1291 |
+
"""
|
| 1292 |
+
ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
|
| 1293 |
+
Named-Entity-Recognition (NER) tasks.
|
| 1294 |
+
""",
|
| 1295 |
+
CONVBERT_START_DOCSTRING,
|
| 1296 |
+
)
|
| 1297 |
+
class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):
|
| 1298 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 1299 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1300 |
+
|
| 1301 |
+
self.num_labels = config.num_labels
|
| 1302 |
+
self.convbert = TFConvBertMainLayer(config, name="convbert")
|
| 1303 |
+
classifier_dropout = (
|
| 1304 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
| 1305 |
+
)
|
| 1306 |
+
self.dropout = keras.layers.Dropout(classifier_dropout)
|
| 1307 |
+
self.classifier = keras.layers.Dense(
|
| 1308 |
+
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
|
| 1309 |
+
)
|
| 1310 |
+
self.config = config
|
| 1311 |
+
|
| 1312 |
+
@unpack_inputs
|
| 1313 |
+
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1314 |
+
@add_code_sample_docstrings(
|
| 1315 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1316 |
+
output_type=TFTokenClassifierOutput,
|
| 1317 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1318 |
+
)
|
| 1319 |
+
def call(
|
| 1320 |
+
self,
|
| 1321 |
+
input_ids: TFModelInputType | None = None,
|
| 1322 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 1323 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 1324 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 1325 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 1326 |
+
inputs_embeds: tf.Tensor | None = None,
|
| 1327 |
+
output_attentions: Optional[bool] = None,
|
| 1328 |
+
output_hidden_states: Optional[bool] = None,
|
| 1329 |
+
return_dict: Optional[bool] = None,
|
| 1330 |
+
labels: tf.Tensor | None = None,
|
| 1331 |
+
training: Optional[bool] = False,
|
| 1332 |
+
) -> Union[Tuple, TFTokenClassifierOutput]:
|
| 1333 |
+
r"""
|
| 1334 |
+
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1335 |
+
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
|
| 1336 |
+
"""
|
| 1337 |
+
outputs = self.convbert(
|
| 1338 |
+
input_ids,
|
| 1339 |
+
attention_mask=attention_mask,
|
| 1340 |
+
token_type_ids=token_type_ids,
|
| 1341 |
+
position_ids=position_ids,
|
| 1342 |
+
head_mask=head_mask,
|
| 1343 |
+
inputs_embeds=inputs_embeds,
|
| 1344 |
+
output_attentions=output_attentions,
|
| 1345 |
+
output_hidden_states=output_hidden_states,
|
| 1346 |
+
return_dict=return_dict,
|
| 1347 |
+
training=training,
|
| 1348 |
+
)
|
| 1349 |
+
sequence_output = outputs[0]
|
| 1350 |
+
sequence_output = self.dropout(sequence_output, training=training)
|
| 1351 |
+
logits = self.classifier(sequence_output)
|
| 1352 |
+
loss = None if labels is None else self.hf_compute_loss(labels, logits)
|
| 1353 |
+
|
| 1354 |
+
if not return_dict:
|
| 1355 |
+
output = (logits,) + outputs[1:]
|
| 1356 |
+
return ((loss,) + output) if loss is not None else output
|
| 1357 |
+
|
| 1358 |
+
return TFTokenClassifierOutput(
|
| 1359 |
+
loss=loss,
|
| 1360 |
+
logits=logits,
|
| 1361 |
+
hidden_states=outputs.hidden_states,
|
| 1362 |
+
attentions=outputs.attentions,
|
| 1363 |
+
)
|
| 1364 |
+
|
| 1365 |
+
def build(self, input_shape=None):
|
| 1366 |
+
if self.built:
|
| 1367 |
+
return
|
| 1368 |
+
self.built = True
|
| 1369 |
+
if getattr(self, "convbert", None) is not None:
|
| 1370 |
+
with tf.name_scope(self.convbert.name):
|
| 1371 |
+
self.convbert.build(None)
|
| 1372 |
+
if getattr(self, "classifier", None) is not None:
|
| 1373 |
+
with tf.name_scope(self.classifier.name):
|
| 1374 |
+
self.classifier.build([None, None, self.config.hidden_size])
|
| 1375 |
+
|
| 1376 |
+
|
| 1377 |
+
@add_start_docstrings(
|
| 1378 |
+
"""
|
| 1379 |
+
ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
|
| 1380 |
+
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
| 1381 |
+
""",
|
| 1382 |
+
CONVBERT_START_DOCSTRING,
|
| 1383 |
+
)
|
| 1384 |
+
class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):
|
| 1385 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 1386 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1387 |
+
|
| 1388 |
+
self.num_labels = config.num_labels
|
| 1389 |
+
self.convbert = TFConvBertMainLayer(config, name="convbert")
|
| 1390 |
+
self.qa_outputs = keras.layers.Dense(
|
| 1391 |
+
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
|
| 1392 |
+
)
|
| 1393 |
+
self.config = config
|
| 1394 |
+
|
| 1395 |
+
@unpack_inputs
|
| 1396 |
+
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1397 |
+
@add_code_sample_docstrings(
|
| 1398 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1399 |
+
output_type=TFQuestionAnsweringModelOutput,
|
| 1400 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1401 |
+
)
|
| 1402 |
+
def call(
|
| 1403 |
+
self,
|
| 1404 |
+
input_ids: TFModelInputType | None = None,
|
| 1405 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 1406 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 1407 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 1408 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 1409 |
+
inputs_embeds: tf.Tensor | None = None,
|
| 1410 |
+
output_attentions: Optional[bool] = None,
|
| 1411 |
+
output_hidden_states: Optional[bool] = None,
|
| 1412 |
+
return_dict: Optional[bool] = None,
|
| 1413 |
+
start_positions: tf.Tensor | None = None,
|
| 1414 |
+
end_positions: tf.Tensor | None = None,
|
| 1415 |
+
training: Optional[bool] = False,
|
| 1416 |
+
) -> Union[Tuple, TFQuestionAnsweringModelOutput]:
|
| 1417 |
+
r"""
|
| 1418 |
+
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
|
| 1419 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
| 1420 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1421 |
+
are not taken into account for computing the loss.
|
| 1422 |
+
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
|
| 1423 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
| 1424 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1425 |
+
are not taken into account for computing the loss.
|
| 1426 |
+
"""
|
| 1427 |
+
outputs = self.convbert(
|
| 1428 |
+
input_ids,
|
| 1429 |
+
attention_mask=attention_mask,
|
| 1430 |
+
token_type_ids=token_type_ids,
|
| 1431 |
+
position_ids=position_ids,
|
| 1432 |
+
head_mask=head_mask,
|
| 1433 |
+
inputs_embeds=inputs_embeds,
|
| 1434 |
+
output_attentions=output_attentions,
|
| 1435 |
+
output_hidden_states=output_hidden_states,
|
| 1436 |
+
return_dict=return_dict,
|
| 1437 |
+
training=training,
|
| 1438 |
+
)
|
| 1439 |
+
sequence_output = outputs[0]
|
| 1440 |
+
logits = self.qa_outputs(sequence_output)
|
| 1441 |
+
start_logits, end_logits = tf.split(logits, 2, axis=-1)
|
| 1442 |
+
start_logits = tf.squeeze(start_logits, axis=-1)
|
| 1443 |
+
end_logits = tf.squeeze(end_logits, axis=-1)
|
| 1444 |
+
loss = None
|
| 1445 |
+
|
| 1446 |
+
if start_positions is not None and end_positions is not None:
|
| 1447 |
+
labels = {"start_position": start_positions}
|
| 1448 |
+
labels["end_position"] = end_positions
|
| 1449 |
+
loss = self.hf_compute_loss(labels, (start_logits, end_logits))
|
| 1450 |
+
|
| 1451 |
+
if not return_dict:
|
| 1452 |
+
output = (start_logits, end_logits) + outputs[1:]
|
| 1453 |
+
return ((loss,) + output) if loss is not None else output
|
| 1454 |
+
|
| 1455 |
+
return TFQuestionAnsweringModelOutput(
|
| 1456 |
+
loss=loss,
|
| 1457 |
+
start_logits=start_logits,
|
| 1458 |
+
end_logits=end_logits,
|
| 1459 |
+
hidden_states=outputs.hidden_states,
|
| 1460 |
+
attentions=outputs.attentions,
|
| 1461 |
+
)
|
| 1462 |
+
|
| 1463 |
+
def build(self, input_shape=None):
|
| 1464 |
+
if self.built:
|
| 1465 |
+
return
|
| 1466 |
+
self.built = True
|
| 1467 |
+
if getattr(self, "convbert", None) is not None:
|
| 1468 |
+
with tf.name_scope(self.convbert.name):
|
| 1469 |
+
self.convbert.build(None)
|
| 1470 |
+
if getattr(self, "qa_outputs", None) is not None:
|
| 1471 |
+
with tf.name_scope(self.qa_outputs.name):
|
| 1472 |
+
self.qa_outputs.build([None, None, self.config.hidden_size])
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert.py
ADDED
|
@@ -0,0 +1,529 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for ConvBERT."""
|
| 16 |
+
import collections
|
| 17 |
+
import os
|
| 18 |
+
import unicodedata
|
| 19 |
+
from typing import List, Optional, Tuple
|
| 20 |
+
|
| 21 |
+
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
|
| 22 |
+
from ...utils import logging
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
logger = logging.get_logger(__name__)
|
| 26 |
+
|
| 27 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
|
| 28 |
+
|
| 29 |
+
PRETRAINED_VOCAB_FILES_MAP = {
|
| 30 |
+
"vocab_file": {
|
| 31 |
+
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
|
| 32 |
+
"YituTech/conv-bert-medium-small": (
|
| 33 |
+
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
|
| 34 |
+
),
|
| 35 |
+
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
|
| 36 |
+
}
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
| 40 |
+
"YituTech/conv-bert-base": 512,
|
| 41 |
+
"YituTech/conv-bert-medium-small": 512,
|
| 42 |
+
"YituTech/conv-bert-small": 512,
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
PRETRAINED_INIT_CONFIGURATION = {
|
| 47 |
+
"YituTech/conv-bert-base": {"do_lower_case": True},
|
| 48 |
+
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
|
| 49 |
+
"YituTech/conv-bert-small": {"do_lower_case": True},
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# Copied from transformers.models.bert.tokenization_bert.load_vocab
|
| 54 |
+
def load_vocab(vocab_file):
|
| 55 |
+
"""Loads a vocabulary file into a dictionary."""
|
| 56 |
+
vocab = collections.OrderedDict()
|
| 57 |
+
with open(vocab_file, "r", encoding="utf-8") as reader:
|
| 58 |
+
tokens = reader.readlines()
|
| 59 |
+
for index, token in enumerate(tokens):
|
| 60 |
+
token = token.rstrip("\n")
|
| 61 |
+
vocab[token] = index
|
| 62 |
+
return vocab
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
|
| 66 |
+
def whitespace_tokenize(text):
|
| 67 |
+
"""Runs basic whitespace cleaning and splitting on a piece of text."""
|
| 68 |
+
text = text.strip()
|
| 69 |
+
if not text:
|
| 70 |
+
return []
|
| 71 |
+
tokens = text.split()
|
| 72 |
+
return tokens
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->YituTech/conv-bert-base, ConvBertTokenizer->BertTokenizer, BERT->ConvBERT
|
| 76 |
+
class ConvBertTokenizer(PreTrainedTokenizer):
|
| 77 |
+
r"""
|
| 78 |
+
Construct a ConvBERT tokenizer. Based on WordPiece.
|
| 79 |
+
|
| 80 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
| 81 |
+
this superclass for more information regarding those methods.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
vocab_file (`str`):
|
| 85 |
+
File containing the vocabulary.
|
| 86 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
| 87 |
+
Whether or not to lowercase the input when tokenizing.
|
| 88 |
+
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
|
| 89 |
+
Whether or not to do basic tokenization before WordPiece.
|
| 90 |
+
never_split (`Iterable`, *optional*):
|
| 91 |
+
Collection of tokens which will never be split during tokenization. Only has an effect when
|
| 92 |
+
`do_basic_tokenize=True`
|
| 93 |
+
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
|
| 94 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 95 |
+
token instead.
|
| 96 |
+
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
|
| 97 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
| 98 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
| 99 |
+
token of a sequence built with special tokens.
|
| 100 |
+
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
|
| 101 |
+
The token used for padding, for example when batching sequences of different lengths.
|
| 102 |
+
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
|
| 103 |
+
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
| 104 |
+
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
| 105 |
+
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
|
| 106 |
+
The token used for masking values. This is the token used when training this model with masked language
|
| 107 |
+
modeling. This is the token which the model will try to predict.
|
| 108 |
+
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
|
| 109 |
+
Whether or not to tokenize Chinese characters.
|
| 110 |
+
|
| 111 |
+
This should likely be deactivated for Japanese (see this
|
| 112 |
+
[issue](https://github.com/huggingface/transformers/issues/328)).
|
| 113 |
+
strip_accents (`bool`, *optional*):
|
| 114 |
+
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
| 115 |
+
value for `lowercase` (as in the original ConvBERT).
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 119 |
+
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
| 120 |
+
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
|
| 121 |
+
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
| 122 |
+
|
| 123 |
+
def __init__(
|
| 124 |
+
self,
|
| 125 |
+
vocab_file,
|
| 126 |
+
do_lower_case=True,
|
| 127 |
+
do_basic_tokenize=True,
|
| 128 |
+
never_split=None,
|
| 129 |
+
unk_token="[UNK]",
|
| 130 |
+
sep_token="[SEP]",
|
| 131 |
+
pad_token="[PAD]",
|
| 132 |
+
cls_token="[CLS]",
|
| 133 |
+
mask_token="[MASK]",
|
| 134 |
+
tokenize_chinese_chars=True,
|
| 135 |
+
strip_accents=None,
|
| 136 |
+
**kwargs,
|
| 137 |
+
):
|
| 138 |
+
if not os.path.isfile(vocab_file):
|
| 139 |
+
raise ValueError(
|
| 140 |
+
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
|
| 141 |
+
" model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
|
| 142 |
+
)
|
| 143 |
+
self.vocab = load_vocab(vocab_file)
|
| 144 |
+
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
|
| 145 |
+
self.do_basic_tokenize = do_basic_tokenize
|
| 146 |
+
if do_basic_tokenize:
|
| 147 |
+
self.basic_tokenizer = BasicTokenizer(
|
| 148 |
+
do_lower_case=do_lower_case,
|
| 149 |
+
never_split=never_split,
|
| 150 |
+
tokenize_chinese_chars=tokenize_chinese_chars,
|
| 151 |
+
strip_accents=strip_accents,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
|
| 155 |
+
|
| 156 |
+
super().__init__(
|
| 157 |
+
do_lower_case=do_lower_case,
|
| 158 |
+
do_basic_tokenize=do_basic_tokenize,
|
| 159 |
+
never_split=never_split,
|
| 160 |
+
unk_token=unk_token,
|
| 161 |
+
sep_token=sep_token,
|
| 162 |
+
pad_token=pad_token,
|
| 163 |
+
cls_token=cls_token,
|
| 164 |
+
mask_token=mask_token,
|
| 165 |
+
tokenize_chinese_chars=tokenize_chinese_chars,
|
| 166 |
+
strip_accents=strip_accents,
|
| 167 |
+
**kwargs,
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
@property
|
| 171 |
+
def do_lower_case(self):
|
| 172 |
+
return self.basic_tokenizer.do_lower_case
|
| 173 |
+
|
| 174 |
+
@property
|
| 175 |
+
def vocab_size(self):
|
| 176 |
+
return len(self.vocab)
|
| 177 |
+
|
| 178 |
+
def get_vocab(self):
|
| 179 |
+
return dict(self.vocab, **self.added_tokens_encoder)
|
| 180 |
+
|
| 181 |
+
def _tokenize(self, text, split_special_tokens=False):
|
| 182 |
+
split_tokens = []
|
| 183 |
+
if self.do_basic_tokenize:
|
| 184 |
+
for token in self.basic_tokenizer.tokenize(
|
| 185 |
+
text, never_split=self.all_special_tokens if not split_special_tokens else None
|
| 186 |
+
):
|
| 187 |
+
# If the token is part of the never_split set
|
| 188 |
+
if token in self.basic_tokenizer.never_split:
|
| 189 |
+
split_tokens.append(token)
|
| 190 |
+
else:
|
| 191 |
+
split_tokens += self.wordpiece_tokenizer.tokenize(token)
|
| 192 |
+
else:
|
| 193 |
+
split_tokens = self.wordpiece_tokenizer.tokenize(text)
|
| 194 |
+
return split_tokens
|
| 195 |
+
|
| 196 |
+
def _convert_token_to_id(self, token):
|
| 197 |
+
"""Converts a token (str) in an id using the vocab."""
|
| 198 |
+
return self.vocab.get(token, self.vocab.get(self.unk_token))
|
| 199 |
+
|
| 200 |
+
def _convert_id_to_token(self, index):
|
| 201 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
| 202 |
+
return self.ids_to_tokens.get(index, self.unk_token)
|
| 203 |
+
|
| 204 |
+
def convert_tokens_to_string(self, tokens):
|
| 205 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
| 206 |
+
out_string = " ".join(tokens).replace(" ##", "").strip()
|
| 207 |
+
return out_string
|
| 208 |
+
|
| 209 |
+
def build_inputs_with_special_tokens(
|
| 210 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 211 |
+
) -> List[int]:
|
| 212 |
+
"""
|
| 213 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
| 214 |
+
adding special tokens. A ConvBERT sequence has the following format:
|
| 215 |
+
|
| 216 |
+
- single sequence: `[CLS] X [SEP]`
|
| 217 |
+
- pair of sequences: `[CLS] A [SEP] B [SEP]`
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
token_ids_0 (`List[int]`):
|
| 221 |
+
List of IDs to which the special tokens will be added.
|
| 222 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 223 |
+
Optional second list of IDs for sequence pairs.
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
| 227 |
+
"""
|
| 228 |
+
if token_ids_1 is None:
|
| 229 |
+
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
| 230 |
+
cls = [self.cls_token_id]
|
| 231 |
+
sep = [self.sep_token_id]
|
| 232 |
+
return cls + token_ids_0 + sep + token_ids_1 + sep
|
| 233 |
+
|
| 234 |
+
def get_special_tokens_mask(
|
| 235 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
| 236 |
+
) -> List[int]:
|
| 237 |
+
"""
|
| 238 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
| 239 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
| 240 |
+
|
| 241 |
+
Args:
|
| 242 |
+
token_ids_0 (`List[int]`):
|
| 243 |
+
List of IDs.
|
| 244 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 245 |
+
Optional second list of IDs for sequence pairs.
|
| 246 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
| 247 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
| 248 |
+
|
| 249 |
+
Returns:
|
| 250 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
| 251 |
+
"""
|
| 252 |
+
|
| 253 |
+
if already_has_special_tokens:
|
| 254 |
+
return super().get_special_tokens_mask(
|
| 255 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
if token_ids_1 is not None:
|
| 259 |
+
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
|
| 260 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
| 261 |
+
|
| 262 |
+
def create_token_type_ids_from_sequences(
|
| 263 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 264 |
+
) -> List[int]:
|
| 265 |
+
"""
|
| 266 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT sequence
|
| 267 |
+
pair mask has the following format:
|
| 268 |
+
|
| 269 |
+
```
|
| 270 |
+
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
| 271 |
+
| first sequence | second sequence |
|
| 272 |
+
```
|
| 273 |
+
|
| 274 |
+
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
|
| 275 |
+
|
| 276 |
+
Args:
|
| 277 |
+
token_ids_0 (`List[int]`):
|
| 278 |
+
List of IDs.
|
| 279 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 280 |
+
Optional second list of IDs for sequence pairs.
|
| 281 |
+
|
| 282 |
+
Returns:
|
| 283 |
+
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
| 284 |
+
"""
|
| 285 |
+
sep = [self.sep_token_id]
|
| 286 |
+
cls = [self.cls_token_id]
|
| 287 |
+
if token_ids_1 is None:
|
| 288 |
+
return len(cls + token_ids_0 + sep) * [0]
|
| 289 |
+
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
| 290 |
+
|
| 291 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 292 |
+
index = 0
|
| 293 |
+
if os.path.isdir(save_directory):
|
| 294 |
+
vocab_file = os.path.join(
|
| 295 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| 296 |
+
)
|
| 297 |
+
else:
|
| 298 |
+
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
|
| 299 |
+
with open(vocab_file, "w", encoding="utf-8") as writer:
|
| 300 |
+
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
|
| 301 |
+
if index != token_index:
|
| 302 |
+
logger.warning(
|
| 303 |
+
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
|
| 304 |
+
" Please check that the vocabulary is not corrupted!"
|
| 305 |
+
)
|
| 306 |
+
index = token_index
|
| 307 |
+
writer.write(token + "\n")
|
| 308 |
+
index += 1
|
| 309 |
+
return (vocab_file,)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
|
| 313 |
+
class BasicTokenizer(object):
|
| 314 |
+
"""
|
| 315 |
+
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
|
| 316 |
+
|
| 317 |
+
Args:
|
| 318 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
| 319 |
+
Whether or not to lowercase the input when tokenizing.
|
| 320 |
+
never_split (`Iterable`, *optional*):
|
| 321 |
+
Collection of tokens which will never be split during tokenization. Only has an effect when
|
| 322 |
+
`do_basic_tokenize=True`
|
| 323 |
+
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
|
| 324 |
+
Whether or not to tokenize Chinese characters.
|
| 325 |
+
|
| 326 |
+
This should likely be deactivated for Japanese (see this
|
| 327 |
+
[issue](https://github.com/huggingface/transformers/issues/328)).
|
| 328 |
+
strip_accents (`bool`, *optional*):
|
| 329 |
+
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
| 330 |
+
value for `lowercase` (as in the original BERT).
|
| 331 |
+
do_split_on_punc (`bool`, *optional*, defaults to `True`):
|
| 332 |
+
In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
|
| 333 |
+
the full context of the words, such as contractions.
|
| 334 |
+
"""
|
| 335 |
+
|
| 336 |
+
def __init__(
|
| 337 |
+
self,
|
| 338 |
+
do_lower_case=True,
|
| 339 |
+
never_split=None,
|
| 340 |
+
tokenize_chinese_chars=True,
|
| 341 |
+
strip_accents=None,
|
| 342 |
+
do_split_on_punc=True,
|
| 343 |
+
):
|
| 344 |
+
if never_split is None:
|
| 345 |
+
never_split = []
|
| 346 |
+
self.do_lower_case = do_lower_case
|
| 347 |
+
self.never_split = set(never_split)
|
| 348 |
+
self.tokenize_chinese_chars = tokenize_chinese_chars
|
| 349 |
+
self.strip_accents = strip_accents
|
| 350 |
+
self.do_split_on_punc = do_split_on_punc
|
| 351 |
+
|
| 352 |
+
def tokenize(self, text, never_split=None):
|
| 353 |
+
"""
|
| 354 |
+
Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
|
| 355 |
+
|
| 356 |
+
Args:
|
| 357 |
+
never_split (`List[str]`, *optional*)
|
| 358 |
+
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
|
| 359 |
+
[`PreTrainedTokenizer.tokenize`]) List of token not to split.
|
| 360 |
+
"""
|
| 361 |
+
# union() returns a new set by concatenating the two sets.
|
| 362 |
+
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
|
| 363 |
+
text = self._clean_text(text)
|
| 364 |
+
|
| 365 |
+
# This was added on November 1st, 2018 for the multilingual and Chinese
|
| 366 |
+
# models. This is also applied to the English models now, but it doesn't
|
| 367 |
+
# matter since the English models were not trained on any Chinese data
|
| 368 |
+
# and generally don't have any Chinese data in them (there are Chinese
|
| 369 |
+
# characters in the vocabulary because Wikipedia does have some Chinese
|
| 370 |
+
# words in the English Wikipedia.).
|
| 371 |
+
if self.tokenize_chinese_chars:
|
| 372 |
+
text = self._tokenize_chinese_chars(text)
|
| 373 |
+
# prevents treating the same character with different unicode codepoints as different characters
|
| 374 |
+
unicode_normalized_text = unicodedata.normalize("NFC", text)
|
| 375 |
+
orig_tokens = whitespace_tokenize(unicode_normalized_text)
|
| 376 |
+
split_tokens = []
|
| 377 |
+
for token in orig_tokens:
|
| 378 |
+
if token not in never_split:
|
| 379 |
+
if self.do_lower_case:
|
| 380 |
+
token = token.lower()
|
| 381 |
+
if self.strip_accents is not False:
|
| 382 |
+
token = self._run_strip_accents(token)
|
| 383 |
+
elif self.strip_accents:
|
| 384 |
+
token = self._run_strip_accents(token)
|
| 385 |
+
split_tokens.extend(self._run_split_on_punc(token, never_split))
|
| 386 |
+
|
| 387 |
+
output_tokens = whitespace_tokenize(" ".join(split_tokens))
|
| 388 |
+
return output_tokens
|
| 389 |
+
|
| 390 |
+
def _run_strip_accents(self, text):
|
| 391 |
+
"""Strips accents from a piece of text."""
|
| 392 |
+
text = unicodedata.normalize("NFD", text)
|
| 393 |
+
output = []
|
| 394 |
+
for char in text:
|
| 395 |
+
cat = unicodedata.category(char)
|
| 396 |
+
if cat == "Mn":
|
| 397 |
+
continue
|
| 398 |
+
output.append(char)
|
| 399 |
+
return "".join(output)
|
| 400 |
+
|
| 401 |
+
def _run_split_on_punc(self, text, never_split=None):
|
| 402 |
+
"""Splits punctuation on a piece of text."""
|
| 403 |
+
if not self.do_split_on_punc or (never_split is not None and text in never_split):
|
| 404 |
+
return [text]
|
| 405 |
+
chars = list(text)
|
| 406 |
+
i = 0
|
| 407 |
+
start_new_word = True
|
| 408 |
+
output = []
|
| 409 |
+
while i < len(chars):
|
| 410 |
+
char = chars[i]
|
| 411 |
+
if _is_punctuation(char):
|
| 412 |
+
output.append([char])
|
| 413 |
+
start_new_word = True
|
| 414 |
+
else:
|
| 415 |
+
if start_new_word:
|
| 416 |
+
output.append([])
|
| 417 |
+
start_new_word = False
|
| 418 |
+
output[-1].append(char)
|
| 419 |
+
i += 1
|
| 420 |
+
|
| 421 |
+
return ["".join(x) for x in output]
|
| 422 |
+
|
| 423 |
+
def _tokenize_chinese_chars(self, text):
|
| 424 |
+
"""Adds whitespace around any CJK character."""
|
| 425 |
+
output = []
|
| 426 |
+
for char in text:
|
| 427 |
+
cp = ord(char)
|
| 428 |
+
if self._is_chinese_char(cp):
|
| 429 |
+
output.append(" ")
|
| 430 |
+
output.append(char)
|
| 431 |
+
output.append(" ")
|
| 432 |
+
else:
|
| 433 |
+
output.append(char)
|
| 434 |
+
return "".join(output)
|
| 435 |
+
|
| 436 |
+
def _is_chinese_char(self, cp):
|
| 437 |
+
"""Checks whether CP is the codepoint of a CJK character."""
|
| 438 |
+
# This defines a "chinese character" as anything in the CJK Unicode block:
|
| 439 |
+
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
|
| 440 |
+
#
|
| 441 |
+
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
|
| 442 |
+
# despite its name. The modern Korean Hangul alphabet is a different block,
|
| 443 |
+
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
|
| 444 |
+
# space-separated words, so they are not treated specially and handled
|
| 445 |
+
# like the all of the other languages.
|
| 446 |
+
if (
|
| 447 |
+
(cp >= 0x4E00 and cp <= 0x9FFF)
|
| 448 |
+
or (cp >= 0x3400 and cp <= 0x4DBF) #
|
| 449 |
+
or (cp >= 0x20000 and cp <= 0x2A6DF) #
|
| 450 |
+
or (cp >= 0x2A700 and cp <= 0x2B73F) #
|
| 451 |
+
or (cp >= 0x2B740 and cp <= 0x2B81F) #
|
| 452 |
+
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
|
| 453 |
+
or (cp >= 0xF900 and cp <= 0xFAFF)
|
| 454 |
+
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
|
| 455 |
+
): #
|
| 456 |
+
return True
|
| 457 |
+
|
| 458 |
+
return False
|
| 459 |
+
|
| 460 |
+
def _clean_text(self, text):
|
| 461 |
+
"""Performs invalid character removal and whitespace cleanup on text."""
|
| 462 |
+
output = []
|
| 463 |
+
for char in text:
|
| 464 |
+
cp = ord(char)
|
| 465 |
+
if cp == 0 or cp == 0xFFFD or _is_control(char):
|
| 466 |
+
continue
|
| 467 |
+
if _is_whitespace(char):
|
| 468 |
+
output.append(" ")
|
| 469 |
+
else:
|
| 470 |
+
output.append(char)
|
| 471 |
+
return "".join(output)
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
|
| 475 |
+
class WordpieceTokenizer(object):
|
| 476 |
+
"""Runs WordPiece tokenization."""
|
| 477 |
+
|
| 478 |
+
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
|
| 479 |
+
self.vocab = vocab
|
| 480 |
+
self.unk_token = unk_token
|
| 481 |
+
self.max_input_chars_per_word = max_input_chars_per_word
|
| 482 |
+
|
| 483 |
+
def tokenize(self, text):
|
| 484 |
+
"""
|
| 485 |
+
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
|
| 486 |
+
tokenization using the given vocabulary.
|
| 487 |
+
|
| 488 |
+
For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
|
| 489 |
+
|
| 490 |
+
Args:
|
| 491 |
+
text: A single token or whitespace separated tokens. This should have
|
| 492 |
+
already been passed through *BasicTokenizer*.
|
| 493 |
+
|
| 494 |
+
Returns:
|
| 495 |
+
A list of wordpiece tokens.
|
| 496 |
+
"""
|
| 497 |
+
|
| 498 |
+
output_tokens = []
|
| 499 |
+
for token in whitespace_tokenize(text):
|
| 500 |
+
chars = list(token)
|
| 501 |
+
if len(chars) > self.max_input_chars_per_word:
|
| 502 |
+
output_tokens.append(self.unk_token)
|
| 503 |
+
continue
|
| 504 |
+
|
| 505 |
+
is_bad = False
|
| 506 |
+
start = 0
|
| 507 |
+
sub_tokens = []
|
| 508 |
+
while start < len(chars):
|
| 509 |
+
end = len(chars)
|
| 510 |
+
cur_substr = None
|
| 511 |
+
while start < end:
|
| 512 |
+
substr = "".join(chars[start:end])
|
| 513 |
+
if start > 0:
|
| 514 |
+
substr = "##" + substr
|
| 515 |
+
if substr in self.vocab:
|
| 516 |
+
cur_substr = substr
|
| 517 |
+
break
|
| 518 |
+
end -= 1
|
| 519 |
+
if cur_substr is None:
|
| 520 |
+
is_bad = True
|
| 521 |
+
break
|
| 522 |
+
sub_tokens.append(cur_substr)
|
| 523 |
+
start = end
|
| 524 |
+
|
| 525 |
+
if is_bad:
|
| 526 |
+
output_tokens.append(self.unk_token)
|
| 527 |
+
else:
|
| 528 |
+
output_tokens.extend(sub_tokens)
|
| 529 |
+
return output_tokens
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert_fast.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for ConvBERT."""
|
| 16 |
+
import json
|
| 17 |
+
from typing import List, Optional, Tuple
|
| 18 |
+
|
| 19 |
+
from tokenizers import normalizers
|
| 20 |
+
|
| 21 |
+
from ...tokenization_utils_fast import PreTrainedTokenizerFast
|
| 22 |
+
from ...utils import logging
|
| 23 |
+
from .tokenization_convbert import ConvBertTokenizer
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__)
|
| 27 |
+
|
| 28 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
|
| 29 |
+
|
| 30 |
+
PRETRAINED_VOCAB_FILES_MAP = {
|
| 31 |
+
"vocab_file": {
|
| 32 |
+
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
|
| 33 |
+
"YituTech/conv-bert-medium-small": (
|
| 34 |
+
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
|
| 35 |
+
),
|
| 36 |
+
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
| 41 |
+
"YituTech/conv-bert-base": 512,
|
| 42 |
+
"YituTech/conv-bert-medium-small": 512,
|
| 43 |
+
"YituTech/conv-bert-small": 512,
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
PRETRAINED_INIT_CONFIGURATION = {
|
| 48 |
+
"YituTech/conv-bert-base": {"do_lower_case": True},
|
| 49 |
+
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
|
| 50 |
+
"YituTech/conv-bert-small": {"do_lower_case": True},
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->YituTech/conv-bert-base, Bert->ConvBert, BERT->ConvBERT
|
| 55 |
+
class ConvBertTokenizerFast(PreTrainedTokenizerFast):
|
| 56 |
+
r"""
|
| 57 |
+
Construct a "fast" ConvBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
|
| 58 |
+
|
| 59 |
+
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
|
| 60 |
+
refer to this superclass for more information regarding those methods.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
vocab_file (`str`):
|
| 64 |
+
File containing the vocabulary.
|
| 65 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
| 66 |
+
Whether or not to lowercase the input when tokenizing.
|
| 67 |
+
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
|
| 68 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 69 |
+
token instead.
|
| 70 |
+
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
|
| 71 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
| 72 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
| 73 |
+
token of a sequence built with special tokens.
|
| 74 |
+
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
|
| 75 |
+
The token used for padding, for example when batching sequences of different lengths.
|
| 76 |
+
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
|
| 77 |
+
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
| 78 |
+
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
| 79 |
+
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
|
| 80 |
+
The token used for masking values. This is the token used when training this model with masked language
|
| 81 |
+
modeling. This is the token which the model will try to predict.
|
| 82 |
+
clean_text (`bool`, *optional*, defaults to `True`):
|
| 83 |
+
Whether or not to clean the text before tokenization by removing any control characters and replacing all
|
| 84 |
+
whitespaces by the classic one.
|
| 85 |
+
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
|
| 86 |
+
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
|
| 87 |
+
issue](https://github.com/huggingface/transformers/issues/328)).
|
| 88 |
+
strip_accents (`bool`, *optional*):
|
| 89 |
+
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
| 90 |
+
value for `lowercase` (as in the original ConvBERT).
|
| 91 |
+
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
|
| 92 |
+
The prefix for subwords.
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 96 |
+
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
| 97 |
+
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
|
| 98 |
+
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
| 99 |
+
slow_tokenizer_class = ConvBertTokenizer
|
| 100 |
+
|
| 101 |
+
def __init__(
|
| 102 |
+
self,
|
| 103 |
+
vocab_file=None,
|
| 104 |
+
tokenizer_file=None,
|
| 105 |
+
do_lower_case=True,
|
| 106 |
+
unk_token="[UNK]",
|
| 107 |
+
sep_token="[SEP]",
|
| 108 |
+
pad_token="[PAD]",
|
| 109 |
+
cls_token="[CLS]",
|
| 110 |
+
mask_token="[MASK]",
|
| 111 |
+
tokenize_chinese_chars=True,
|
| 112 |
+
strip_accents=None,
|
| 113 |
+
**kwargs,
|
| 114 |
+
):
|
| 115 |
+
super().__init__(
|
| 116 |
+
vocab_file,
|
| 117 |
+
tokenizer_file=tokenizer_file,
|
| 118 |
+
do_lower_case=do_lower_case,
|
| 119 |
+
unk_token=unk_token,
|
| 120 |
+
sep_token=sep_token,
|
| 121 |
+
pad_token=pad_token,
|
| 122 |
+
cls_token=cls_token,
|
| 123 |
+
mask_token=mask_token,
|
| 124 |
+
tokenize_chinese_chars=tokenize_chinese_chars,
|
| 125 |
+
strip_accents=strip_accents,
|
| 126 |
+
**kwargs,
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
|
| 130 |
+
if (
|
| 131 |
+
normalizer_state.get("lowercase", do_lower_case) != do_lower_case
|
| 132 |
+
or normalizer_state.get("strip_accents", strip_accents) != strip_accents
|
| 133 |
+
or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
|
| 134 |
+
):
|
| 135 |
+
normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
|
| 136 |
+
normalizer_state["lowercase"] = do_lower_case
|
| 137 |
+
normalizer_state["strip_accents"] = strip_accents
|
| 138 |
+
normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
|
| 139 |
+
self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
|
| 140 |
+
|
| 141 |
+
self.do_lower_case = do_lower_case
|
| 142 |
+
|
| 143 |
+
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
| 144 |
+
"""
|
| 145 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
| 146 |
+
adding special tokens. A ConvBERT sequence has the following format:
|
| 147 |
+
|
| 148 |
+
- single sequence: `[CLS] X [SEP]`
|
| 149 |
+
- pair of sequences: `[CLS] A [SEP] B [SEP]`
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
token_ids_0 (`List[int]`):
|
| 153 |
+
List of IDs to which the special tokens will be added.
|
| 154 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 155 |
+
Optional second list of IDs for sequence pairs.
|
| 156 |
+
|
| 157 |
+
Returns:
|
| 158 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
| 159 |
+
"""
|
| 160 |
+
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
| 161 |
+
|
| 162 |
+
if token_ids_1 is not None:
|
| 163 |
+
output += token_ids_1 + [self.sep_token_id]
|
| 164 |
+
|
| 165 |
+
return output
|
| 166 |
+
|
| 167 |
+
def create_token_type_ids_from_sequences(
|
| 168 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| 169 |
+
) -> List[int]:
|
| 170 |
+
"""
|
| 171 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT sequence
|
| 172 |
+
pair mask has the following format:
|
| 173 |
+
|
| 174 |
+
```
|
| 175 |
+
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
| 176 |
+
| first sequence | second sequence |
|
| 177 |
+
```
|
| 178 |
+
|
| 179 |
+
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
token_ids_0 (`List[int]`):
|
| 183 |
+
List of IDs.
|
| 184 |
+
token_ids_1 (`List[int]`, *optional*):
|
| 185 |
+
Optional second list of IDs for sequence pairs.
|
| 186 |
+
|
| 187 |
+
Returns:
|
| 188 |
+
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
| 189 |
+
"""
|
| 190 |
+
sep = [self.sep_token_id]
|
| 191 |
+
cls = [self.cls_token_id]
|
| 192 |
+
if token_ids_1 is None:
|
| 193 |
+
return len(cls + token_ids_0 + sep) * [0]
|
| 194 |
+
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
| 195 |
+
|
| 196 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 197 |
+
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
|
| 198 |
+
return tuple(files)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/dialogpt/__init__.py
ADDED
|
File without changes
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (185 Bytes). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
ADDED
|
Binary file (1.09 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import os
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
|
| 20 |
+
from transformers.utils import WEIGHTS_NAME
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
DIALOGPT_MODELS = ["small", "medium", "large"]
|
| 24 |
+
|
| 25 |
+
OLD_KEY = "lm_head.decoder.weight"
|
| 26 |
+
NEW_KEY = "lm_head.weight"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def convert_dialogpt_checkpoint(checkpoint_path: str, pytorch_dump_folder_path: str):
|
| 30 |
+
d = torch.load(checkpoint_path)
|
| 31 |
+
d[NEW_KEY] = d.pop(OLD_KEY)
|
| 32 |
+
os.makedirs(pytorch_dump_folder_path, exist_ok=True)
|
| 33 |
+
torch.save(d, os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME))
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
if __name__ == "__main__":
|
| 37 |
+
parser = argparse.ArgumentParser()
|
| 38 |
+
parser.add_argument("--dialogpt_path", default=".", type=str)
|
| 39 |
+
args = parser.parse_args()
|
| 40 |
+
for MODEL in DIALOGPT_MODELS:
|
| 41 |
+
checkpoint_path = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
|
| 42 |
+
pytorch_dump_folder_path = f"./DialoGPT-{MODEL}"
|
| 43 |
+
convert_dialogpt_checkpoint(
|
| 44 |
+
checkpoint_path,
|
| 45 |
+
pytorch_dump_folder_path,
|
| 46 |
+
)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__init__.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import TYPE_CHECKING
|
| 16 |
+
|
| 17 |
+
from ...utils import (
|
| 18 |
+
OptionalDependencyNotAvailable,
|
| 19 |
+
_LazyModule,
|
| 20 |
+
is_flax_available,
|
| 21 |
+
is_tf_available,
|
| 22 |
+
is_tokenizers_available,
|
| 23 |
+
is_torch_available,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
_import_structure = {
|
| 28 |
+
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
|
| 29 |
+
"tokenization_electra": ["ElectraTokenizer"],
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
try:
|
| 33 |
+
if not is_tokenizers_available():
|
| 34 |
+
raise OptionalDependencyNotAvailable()
|
| 35 |
+
except OptionalDependencyNotAvailable:
|
| 36 |
+
pass
|
| 37 |
+
else:
|
| 38 |
+
_import_structure["tokenization_electra_fast"] = ["ElectraTokenizerFast"]
|
| 39 |
+
|
| 40 |
+
try:
|
| 41 |
+
if not is_torch_available():
|
| 42 |
+
raise OptionalDependencyNotAvailable()
|
| 43 |
+
except OptionalDependencyNotAvailable:
|
| 44 |
+
pass
|
| 45 |
+
else:
|
| 46 |
+
_import_structure["modeling_electra"] = [
|
| 47 |
+
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
|
| 48 |
+
"ElectraForCausalLM",
|
| 49 |
+
"ElectraForMaskedLM",
|
| 50 |
+
"ElectraForMultipleChoice",
|
| 51 |
+
"ElectraForPreTraining",
|
| 52 |
+
"ElectraForQuestionAnswering",
|
| 53 |
+
"ElectraForSequenceClassification",
|
| 54 |
+
"ElectraForTokenClassification",
|
| 55 |
+
"ElectraModel",
|
| 56 |
+
"ElectraPreTrainedModel",
|
| 57 |
+
"load_tf_weights_in_electra",
|
| 58 |
+
]
|
| 59 |
+
|
| 60 |
+
try:
|
| 61 |
+
if not is_tf_available():
|
| 62 |
+
raise OptionalDependencyNotAvailable()
|
| 63 |
+
except OptionalDependencyNotAvailable:
|
| 64 |
+
pass
|
| 65 |
+
else:
|
| 66 |
+
_import_structure["modeling_tf_electra"] = [
|
| 67 |
+
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
|
| 68 |
+
"TFElectraForMaskedLM",
|
| 69 |
+
"TFElectraForMultipleChoice",
|
| 70 |
+
"TFElectraForPreTraining",
|
| 71 |
+
"TFElectraForQuestionAnswering",
|
| 72 |
+
"TFElectraForSequenceClassification",
|
| 73 |
+
"TFElectraForTokenClassification",
|
| 74 |
+
"TFElectraModel",
|
| 75 |
+
"TFElectraPreTrainedModel",
|
| 76 |
+
]
|
| 77 |
+
|
| 78 |
+
try:
|
| 79 |
+
if not is_flax_available():
|
| 80 |
+
raise OptionalDependencyNotAvailable()
|
| 81 |
+
except OptionalDependencyNotAvailable:
|
| 82 |
+
pass
|
| 83 |
+
else:
|
| 84 |
+
_import_structure["modeling_flax_electra"] = [
|
| 85 |
+
"FlaxElectraForCausalLM",
|
| 86 |
+
"FlaxElectraForMaskedLM",
|
| 87 |
+
"FlaxElectraForMultipleChoice",
|
| 88 |
+
"FlaxElectraForPreTraining",
|
| 89 |
+
"FlaxElectraForQuestionAnswering",
|
| 90 |
+
"FlaxElectraForSequenceClassification",
|
| 91 |
+
"FlaxElectraForTokenClassification",
|
| 92 |
+
"FlaxElectraModel",
|
| 93 |
+
"FlaxElectraPreTrainedModel",
|
| 94 |
+
]
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
if TYPE_CHECKING:
|
| 98 |
+
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
|
| 99 |
+
from .tokenization_electra import ElectraTokenizer
|
| 100 |
+
|
| 101 |
+
try:
|
| 102 |
+
if not is_tokenizers_available():
|
| 103 |
+
raise OptionalDependencyNotAvailable()
|
| 104 |
+
except OptionalDependencyNotAvailable:
|
| 105 |
+
pass
|
| 106 |
+
else:
|
| 107 |
+
from .tokenization_electra_fast import ElectraTokenizerFast
|
| 108 |
+
|
| 109 |
+
try:
|
| 110 |
+
if not is_torch_available():
|
| 111 |
+
raise OptionalDependencyNotAvailable()
|
| 112 |
+
except OptionalDependencyNotAvailable:
|
| 113 |
+
pass
|
| 114 |
+
else:
|
| 115 |
+
from .modeling_electra import (
|
| 116 |
+
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
| 117 |
+
ElectraForCausalLM,
|
| 118 |
+
ElectraForMaskedLM,
|
| 119 |
+
ElectraForMultipleChoice,
|
| 120 |
+
ElectraForPreTraining,
|
| 121 |
+
ElectraForQuestionAnswering,
|
| 122 |
+
ElectraForSequenceClassification,
|
| 123 |
+
ElectraForTokenClassification,
|
| 124 |
+
ElectraModel,
|
| 125 |
+
ElectraPreTrainedModel,
|
| 126 |
+
load_tf_weights_in_electra,
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
try:
|
| 130 |
+
if not is_tf_available():
|
| 131 |
+
raise OptionalDependencyNotAvailable()
|
| 132 |
+
except OptionalDependencyNotAvailable:
|
| 133 |
+
pass
|
| 134 |
+
else:
|
| 135 |
+
from .modeling_tf_electra import (
|
| 136 |
+
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
| 137 |
+
TFElectraForMaskedLM,
|
| 138 |
+
TFElectraForMultipleChoice,
|
| 139 |
+
TFElectraForPreTraining,
|
| 140 |
+
TFElectraForQuestionAnswering,
|
| 141 |
+
TFElectraForSequenceClassification,
|
| 142 |
+
TFElectraForTokenClassification,
|
| 143 |
+
TFElectraModel,
|
| 144 |
+
TFElectraPreTrainedModel,
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
try:
|
| 148 |
+
if not is_flax_available():
|
| 149 |
+
raise OptionalDependencyNotAvailable()
|
| 150 |
+
except OptionalDependencyNotAvailable:
|
| 151 |
+
pass
|
| 152 |
+
else:
|
| 153 |
+
from .modeling_flax_electra import (
|
| 154 |
+
FlaxElectraForCausalLM,
|
| 155 |
+
FlaxElectraForMaskedLM,
|
| 156 |
+
FlaxElectraForMultipleChoice,
|
| 157 |
+
FlaxElectraForPreTraining,
|
| 158 |
+
FlaxElectraForQuestionAnswering,
|
| 159 |
+
FlaxElectraForSequenceClassification,
|
| 160 |
+
FlaxElectraForTokenClassification,
|
| 161 |
+
FlaxElectraModel,
|
| 162 |
+
FlaxElectraPreTrainedModel,
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
else:
|
| 166 |
+
import sys
|
| 167 |
+
|
| 168 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.53 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/configuration_electra.cpython-310.pyc
ADDED
|
Binary file (8.92 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/convert_electra_original_tf_checkpoint_to_pytorch.cpython-310.pyc
ADDED
|
Binary file (1.85 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_electra.cpython-310.pyc
ADDED
|
Binary file (49.1 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_flax_electra.cpython-310.pyc
ADDED
|
Binary file (40.6 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_tf_electra.cpython-310.pyc
ADDED
|
Binary file (51.7 kB). View file
|
|
|