ZTWHHH commited on
Commit
bd68c14
·
verified ·
1 Parent(s): 99a6fc5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. llava_next/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 +3 -0
  3. llava_next/lib/python3.10/site-packages/nvidia/cufft/include/cufftXt.h +268 -0
  4. llava_next/lib/python3.10/site-packages/nvidia/cufft/include/cufftw.h +454 -0
  5. llava_next/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 +3 -0
  6. parrot/lib/python3.10/site-packages/pyarrow/libarrow_python.so +3 -0
  7. parrot/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc +0 -0
  10. parrot/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py +374 -0
  11. parrot/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py +948 -0
  12. parrot/lib/python3.10/site-packages/transformers/models/bertweet/__init__.py +29 -0
  13. parrot/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/__init__.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/transformers/models/bertweet/tokenization_bertweet.py +767 -0
  15. parrot/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert_fast.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/transformers/models/convbert/configuration_convbert.py +157 -0
  18. parrot/lib/python3.10/site-packages/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py +57 -0
  19. parrot/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert.py +503 -0
  20. parrot/lib/python3.10/site-packages/transformers/models/dinat/__init__.py +54 -0
  21. parrot/lib/python3.10/site-packages/transformers/models/groupvit/__init__.py +91 -0
  22. parrot/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/configuration_groupvit.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/convert_groupvit_nvlab_to_hf.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_tf_groupvit.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.py +449 -0
  26. parrot/lib/python3.10/site-packages/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py +217 -0
  27. parrot/lib/python3.10/site-packages/transformers/models/groupvit/modeling_groupvit.py +1582 -0
  28. parrot/lib/python3.10/site-packages/transformers/models/groupvit/modeling_tf_groupvit.py +2139 -0
  29. parrot/lib/python3.10/site-packages/transformers/models/idefics2/__init__.py +72 -0
  30. parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/__init__.cpython-310.pyc +0 -0
  31. parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/configuration_idefics2.cpython-310.pyc +0 -0
  32. parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/convert_idefics2_weights_to_hf.cpython-310.pyc +0 -0
  33. parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/image_processing_idefics2.cpython-310.pyc +0 -0
  34. parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/modeling_idefics2.cpython-310.pyc +0 -0
  35. parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/processing_idefics2.cpython-310.pyc +0 -0
  36. parrot/lib/python3.10/site-packages/transformers/models/idefics2/configuration_idefics2.py +262 -0
  37. parrot/lib/python3.10/site-packages/transformers/models/idefics2/convert_idefics2_weights_to_hf.py +185 -0
  38. parrot/lib/python3.10/site-packages/transformers/models/idefics2/image_processing_idefics2.py +596 -0
  39. parrot/lib/python3.10/site-packages/transformers/models/idefics2/modeling_idefics2.py +1962 -0
  40. parrot/lib/python3.10/site-packages/transformers/models/idefics2/processing_idefics2.py +354 -0
  41. parrot/lib/python3.10/site-packages/transformers/models/mpnet/configuration_mpnet.py +113 -0
  42. parrot/lib/python3.10/site-packages/transformers/models/nllb/__init__.py +64 -0
  43. parrot/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/__init__.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb.cpython-310.pyc +0 -0
  45. parrot/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb.py +433 -0
  46. parrot/lib/python3.10/site-packages/transformers/models/patchtst/__init__.py +61 -0
  47. parrot/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/__init__.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/configuration_patchtst.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/modeling_patchtst.cpython-310.pyc +0 -0
  50. parrot/lib/python3.10/site-packages/transformers/models/patchtst/configuration_patchtst.py +257 -0
.gitattributes CHANGED
@@ -315,3 +315,6 @@ pllava/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.
315
  pllava/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
316
  llava_next/lib/python3.10/site-packages/fontTools/__pycache__/agl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
317
  llava_next/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/builder.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
315
  pllava/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
316
  llava_next/lib/python3.10/site-packages/fontTools/__pycache__/agl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
317
  llava_next/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/builder.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
318
+ llava_next/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 filter=lfs diff=lfs merge=lfs -text
319
+ llava_next/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
320
+ parrot/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text
llava_next/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f85e945ef531cc36603989041594f3bb71a1ae22abd7b258bde0c4813aebc561
3
+ size 737048
llava_next/lib/python3.10/site-packages/nvidia/cufft/include/cufftXt.h ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2005-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*!
51
+ * \file cufftXt.h
52
+ * \brief Public header file for the NVIDIA CUDA FFT library (CUFFT)
53
+ */
54
+
55
+ #ifndef _CUFFTXT_H_
56
+ #define _CUFFTXT_H_
57
+ #include "cudalibxt.h"
58
+ #include "cufft.h"
59
+
60
+
61
+ #ifndef CUFFTAPI
62
+ #ifdef _WIN32
63
+ #define CUFFTAPI __stdcall
64
+ #else
65
+ #define CUFFTAPI
66
+ #endif
67
+ #endif
68
+
69
+ #ifdef __cplusplus
70
+ extern "C" {
71
+ #endif
72
+
73
+ //
74
+ // cufftXtSubFormat identifies the data layout of
75
+ // a memory descriptor owned by cufft.
76
+ // note that multi GPU cufft does not yet support out-of-place transforms
77
+ //
78
+
79
+ typedef enum cufftXtSubFormat_t {
80
+ CUFFT_XT_FORMAT_INPUT = 0x00, //by default input is in linear order across GPUs
81
+ CUFFT_XT_FORMAT_OUTPUT = 0x01, //by default output is in scrambled order depending on transform
82
+ CUFFT_XT_FORMAT_INPLACE = 0x02, //by default inplace is input order, which is linear across GPUs
83
+ CUFFT_XT_FORMAT_INPLACE_SHUFFLED = 0x03, //shuffled output order after execution of the transform
84
+ CUFFT_XT_FORMAT_1D_INPUT_SHUFFLED = 0x04, //shuffled input order prior to execution of 1D transforms
85
+ CUFFT_XT_FORMAT_DISTRIBUTED_INPUT = 0x05,
86
+ CUFFT_XT_FORMAT_DISTRIBUTED_OUTPUT = 0x06,
87
+ CUFFT_FORMAT_UNDEFINED = 0x07
88
+ } cufftXtSubFormat;
89
+
90
+ //
91
+ // cufftXtCopyType specifies the type of copy for cufftXtMemcpy
92
+ //
93
+ typedef enum cufftXtCopyType_t {
94
+ CUFFT_COPY_HOST_TO_DEVICE = 0x00,
95
+ CUFFT_COPY_DEVICE_TO_HOST = 0x01,
96
+ CUFFT_COPY_DEVICE_TO_DEVICE = 0x02,
97
+ CUFFT_COPY_UNDEFINED = 0x03
98
+ } cufftXtCopyType;
99
+
100
+ //
101
+ // cufftXtQueryType specifies the type of query for cufftXtQueryPlan
102
+ //
103
+ typedef enum cufftXtQueryType_t {
104
+ CUFFT_QUERY_1D_FACTORS = 0x00,
105
+ CUFFT_QUERY_UNDEFINED = 0x01
106
+ } cufftXtQueryType;
107
+
108
+ typedef struct cufftXt1dFactors_t {
109
+ long long int size;
110
+ long long int stringCount;
111
+ long long int stringLength;
112
+ long long int substringLength;
113
+ long long int factor1;
114
+ long long int factor2;
115
+ long long int stringMask;
116
+ long long int substringMask;
117
+ long long int factor1Mask;
118
+ long long int factor2Mask;
119
+ int stringShift;
120
+ int substringShift;
121
+ int factor1Shift;
122
+ int factor2Shift;
123
+ } cufftXt1dFactors;
124
+
125
+ //
126
+ // cufftXtWorkAreaPolicy specifies policy for cufftXtSetWorkAreaPolicy
127
+ //
128
+ typedef enum cufftXtWorkAreaPolicy_t {
129
+ CUFFT_WORKAREA_MINIMAL = 0, /* maximum reduction */
130
+ CUFFT_WORKAREA_USER = 1, /* use workSize parameter as limit */
131
+ CUFFT_WORKAREA_PERFORMANCE = 2, /* default - 1x overhead or more, maximum performance */
132
+ } cufftXtWorkAreaPolicy;
133
+
134
+ // multi-GPU routines
135
+ cufftResult CUFFTAPI cufftXtSetGPUs(cufftHandle handle, int nGPUs, int *whichGPUs);
136
+
137
+ cufftResult CUFFTAPI cufftXtMalloc(cufftHandle plan,
138
+ cudaLibXtDesc ** descriptor,
139
+ cufftXtSubFormat format);
140
+
141
+ cufftResult CUFFTAPI cufftXtMemcpy(cufftHandle plan,
142
+ void *dstPointer,
143
+ void *srcPointer,
144
+ cufftXtCopyType type);
145
+
146
+ cufftResult CUFFTAPI cufftXtFree(cudaLibXtDesc *descriptor);
147
+
148
+ cufftResult CUFFTAPI cufftXtSetWorkArea(cufftHandle plan, void **workArea);
149
+
150
+ cufftResult CUFFTAPI cufftXtExecDescriptorC2C(cufftHandle plan,
151
+ cudaLibXtDesc *input,
152
+ cudaLibXtDesc *output,
153
+ int direction);
154
+
155
+ cufftResult CUFFTAPI cufftXtExecDescriptorR2C(cufftHandle plan,
156
+ cudaLibXtDesc *input,
157
+ cudaLibXtDesc *output);
158
+
159
+ cufftResult CUFFTAPI cufftXtExecDescriptorC2R(cufftHandle plan,
160
+ cudaLibXtDesc *input,
161
+ cudaLibXtDesc *output);
162
+
163
+ cufftResult CUFFTAPI cufftXtExecDescriptorZ2Z(cufftHandle plan,
164
+ cudaLibXtDesc *input,
165
+ cudaLibXtDesc *output,
166
+ int direction);
167
+
168
+ cufftResult CUFFTAPI cufftXtExecDescriptorD2Z(cufftHandle plan,
169
+ cudaLibXtDesc *input,
170
+ cudaLibXtDesc *output);
171
+
172
+ cufftResult CUFFTAPI cufftXtExecDescriptorZ2D(cufftHandle plan,
173
+ cudaLibXtDesc *input,
174
+ cudaLibXtDesc *output);
175
+
176
+ // Utility functions
177
+
178
+ cufftResult CUFFTAPI cufftXtQueryPlan(cufftHandle plan, void *queryStruct, cufftXtQueryType queryType);
179
+
180
+
181
+ // callbacks
182
+
183
+
184
+ typedef enum cufftXtCallbackType_t {
185
+ CUFFT_CB_LD_COMPLEX = 0x0,
186
+ CUFFT_CB_LD_COMPLEX_DOUBLE = 0x1,
187
+ CUFFT_CB_LD_REAL = 0x2,
188
+ CUFFT_CB_LD_REAL_DOUBLE = 0x3,
189
+ CUFFT_CB_ST_COMPLEX = 0x4,
190
+ CUFFT_CB_ST_COMPLEX_DOUBLE = 0x5,
191
+ CUFFT_CB_ST_REAL = 0x6,
192
+ CUFFT_CB_ST_REAL_DOUBLE = 0x7,
193
+ CUFFT_CB_UNDEFINED = 0x8
194
+
195
+ } cufftXtCallbackType;
196
+
197
+ typedef cufftComplex (*cufftCallbackLoadC)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer);
198
+ typedef cufftDoubleComplex (*cufftCallbackLoadZ)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer);
199
+ typedef cufftReal (*cufftCallbackLoadR)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer);
200
+ typedef cufftDoubleReal(*cufftCallbackLoadD)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer);
201
+
202
+ typedef void (*cufftCallbackStoreC)(void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPointer);
203
+ typedef void (*cufftCallbackStoreZ)(void *dataOut, size_t offset, cufftDoubleComplex element, void *callerInfo, void *sharedPointer);
204
+ typedef void (*cufftCallbackStoreR)(void *dataOut, size_t offset, cufftReal element, void *callerInfo, void *sharedPointer);
205
+ typedef void (*cufftCallbackStoreD)(void *dataOut, size_t offset, cufftDoubleReal element, void *callerInfo, void *sharedPointer);
206
+
207
+
208
+ cufftResult CUFFTAPI cufftXtSetCallback(cufftHandle plan, void **callback_routine, cufftXtCallbackType cbType, void **caller_info);
209
+ cufftResult CUFFTAPI cufftXtClearCallback(cufftHandle plan, cufftXtCallbackType cbType);
210
+ cufftResult CUFFTAPI cufftXtSetCallbackSharedSize(cufftHandle plan, cufftXtCallbackType cbType, size_t sharedSize);
211
+
212
+ cufftResult CUFFTAPI cufftXtMakePlanMany(cufftHandle plan,
213
+ int rank,
214
+ long long int *n,
215
+ long long int *inembed,
216
+ long long int istride,
217
+ long long int idist,
218
+ cudaDataType inputtype,
219
+ long long int *onembed,
220
+ long long int ostride,
221
+ long long int odist,
222
+ cudaDataType outputtype,
223
+ long long int batch,
224
+ size_t *workSize,
225
+ cudaDataType executiontype);
226
+
227
+ cufftResult CUFFTAPI cufftXtGetSizeMany(cufftHandle plan,
228
+ int rank,
229
+ long long int *n,
230
+ long long int *inembed,
231
+ long long int istride,
232
+ long long int idist,
233
+ cudaDataType inputtype,
234
+ long long int *onembed,
235
+ long long int ostride,
236
+ long long int odist,
237
+ cudaDataType outputtype,
238
+ long long int batch,
239
+ size_t *workSize,
240
+ cudaDataType executiontype);
241
+
242
+
243
+ cufftResult CUFFTAPI cufftXtExec(cufftHandle plan,
244
+ void *input,
245
+ void *output,
246
+ int direction);
247
+
248
+ cufftResult CUFFTAPI cufftXtExecDescriptor(cufftHandle plan,
249
+ cudaLibXtDesc *input,
250
+ cudaLibXtDesc *output,
251
+ int direction);
252
+
253
+ cufftResult CUFFTAPI cufftXtSetWorkAreaPolicy(cufftHandle plan, cufftXtWorkAreaPolicy policy, size_t *workSize);
254
+
255
+ cufftResult CUFFTAPI cufftXtSetDistribution(cufftHandle plan,
256
+ int rank,
257
+ const long long int* lower_input,
258
+ const long long int* upper_input,
259
+ const long long int* lower_output,
260
+ const long long int* upper_output,
261
+ const long long int* strides_input,
262
+ const long long int* strides_output);
263
+
264
+ #ifdef __cplusplus
265
+ }
266
+ #endif
267
+
268
+ #endif
llava_next/lib/python3.10/site-packages/nvidia/cufft/include/cufftw.h ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2005-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*!
51
+ * \file cufftw.h
52
+ * \brief Public header file for the NVIDIA CUDA FFTW library (CUFFTW)
53
+ */
54
+
55
+ #ifndef _CUFFTW_H_
56
+ #define _CUFFTW_H_
57
+
58
+
59
+ #include <stdio.h>
60
+ #include "cufft.h"
61
+
62
+ #ifdef __cplusplus
63
+ extern "C" {
64
+ #endif
65
+
66
+ // transform direction
67
+ #define FFTW_FORWARD -1
68
+ #define FFTW_INVERSE 1
69
+ #define FFTW_BACKWARD 1
70
+
71
+ // Planner flags
72
+
73
+ #define FFTW_ESTIMATE 0x01
74
+ #define FFTW_MEASURE 0x02
75
+ #define FFTW_PATIENT 0x03
76
+ #define FFTW_EXHAUSTIVE 0x04
77
+ #define FFTW_WISDOM_ONLY 0x05
78
+
79
+ //Algorithm restriction flags
80
+
81
+ #define FFTW_DESTROY_INPUT 0x08
82
+ #define FFTW_PRESERVE_INPUT 0x0C
83
+ #define FFTW_UNALIGNED 0x10
84
+
85
+ // CUFFTW defines and supports the following data types
86
+
87
+ // note if complex.h has been included we use the C99 complex types
88
+ #if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined (complex)
89
+ typedef double _Complex fftw_complex;
90
+ typedef float _Complex fftwf_complex;
91
+ #else
92
+ typedef double fftw_complex[2];
93
+ typedef float fftwf_complex[2];
94
+ #endif
95
+
96
+ typedef void *fftw_plan;
97
+
98
+ typedef void *fftwf_plan;
99
+
100
+ typedef struct {
101
+ int n;
102
+ int is;
103
+ int os;
104
+ } fftw_iodim;
105
+
106
+ typedef fftw_iodim fftwf_iodim;
107
+
108
+ typedef struct {
109
+ ptrdiff_t n;
110
+ ptrdiff_t is;
111
+ ptrdiff_t os;
112
+ } fftw_iodim64;
113
+
114
+ typedef fftw_iodim64 fftwf_iodim64;
115
+
116
+
117
+ // CUFFTW defines and supports the following double precision APIs
118
+
119
+
120
+ fftw_plan CUFFTAPI fftw_plan_dft_1d(int n,
121
+ fftw_complex *in,
122
+ fftw_complex *out,
123
+ int sign,
124
+ unsigned flags);
125
+
126
+ fftw_plan CUFFTAPI fftw_plan_dft_2d(int n0,
127
+ int n1,
128
+ fftw_complex *in,
129
+ fftw_complex *out,
130
+ int sign,
131
+ unsigned flags);
132
+
133
+ fftw_plan CUFFTAPI fftw_plan_dft_3d(int n0,
134
+ int n1,
135
+ int n2,
136
+ fftw_complex *in,
137
+ fftw_complex *out,
138
+ int sign,
139
+ unsigned flags);
140
+
141
+ fftw_plan CUFFTAPI fftw_plan_dft(int rank,
142
+ const int *n,
143
+ fftw_complex *in,
144
+ fftw_complex *out,
145
+ int sign,
146
+ unsigned flags);
147
+
148
+ fftw_plan CUFFTAPI fftw_plan_dft_r2c_1d(int n,
149
+ double *in,
150
+ fftw_complex *out,
151
+ unsigned flags);
152
+
153
+ fftw_plan CUFFTAPI fftw_plan_dft_r2c_2d(int n0,
154
+ int n1,
155
+ double *in,
156
+ fftw_complex *out,
157
+ unsigned flags);
158
+
159
+ fftw_plan CUFFTAPI fftw_plan_dft_r2c_3d(int n0,
160
+ int n1,
161
+ int n2,
162
+ double *in,
163
+ fftw_complex *out,
164
+ unsigned flags);
165
+
166
+ fftw_plan CUFFTAPI fftw_plan_dft_r2c(int rank,
167
+ const int *n,
168
+ double *in,
169
+ fftw_complex *out,
170
+ unsigned flags);
171
+
172
+ fftw_plan CUFFTAPI fftw_plan_dft_c2r_1d(int n,
173
+ fftw_complex *in,
174
+ double *out,
175
+ unsigned flags);
176
+
177
+ fftw_plan CUFFTAPI fftw_plan_dft_c2r_2d(int n0,
178
+ int n1,
179
+ fftw_complex *in,
180
+ double *out,
181
+ unsigned flags);
182
+
183
+ fftw_plan CUFFTAPI fftw_plan_dft_c2r_3d(int n0,
184
+ int n1,
185
+ int n2,
186
+ fftw_complex *in,
187
+ double *out,
188
+ unsigned flags);
189
+
190
+ fftw_plan CUFFTAPI fftw_plan_dft_c2r(int rank,
191
+ const int *n,
192
+ fftw_complex *in,
193
+ double *out,
194
+ unsigned flags);
195
+
196
+
197
+ fftw_plan CUFFTAPI fftw_plan_many_dft(int rank,
198
+ const int *n,
199
+ int batch,
200
+ fftw_complex *in,
201
+ const int *inembed, int istride, int idist,
202
+ fftw_complex *out,
203
+ const int *onembed, int ostride, int odist,
204
+ int sign, unsigned flags);
205
+
206
+ fftw_plan CUFFTAPI fftw_plan_many_dft_r2c(int rank,
207
+ const int *n,
208
+ int batch,
209
+ double *in,
210
+ const int *inembed, int istride, int idist,
211
+ fftw_complex *out,
212
+ const int *onembed, int ostride, int odist,
213
+ unsigned flags);
214
+
215
+ fftw_plan CUFFTAPI fftw_plan_many_dft_c2r(int rank,
216
+ const int *n,
217
+ int batch,
218
+ fftw_complex *in,
219
+ const int *inembed, int istride, int idist,
220
+ double *out,
221
+ const int *onembed, int ostride, int odist,
222
+ unsigned flags);
223
+
224
+ fftw_plan CUFFTAPI fftw_plan_guru_dft(int rank, const fftw_iodim *dims,
225
+ int batch_rank, const fftw_iodim *batch_dims,
226
+ fftw_complex *in, fftw_complex *out,
227
+ int sign, unsigned flags);
228
+
229
+ fftw_plan CUFFTAPI fftw_plan_guru_dft_r2c(int rank, const fftw_iodim *dims,
230
+ int batch_rank, const fftw_iodim *batch_dims,
231
+ double *in, fftw_complex *out,
232
+ unsigned flags);
233
+
234
+ fftw_plan CUFFTAPI fftw_plan_guru_dft_c2r(int rank, const fftw_iodim *dims,
235
+ int batch_rank, const fftw_iodim *batch_dims,
236
+ fftw_complex *in, double *out,
237
+ unsigned flags);
238
+
239
+ void CUFFTAPI fftw_execute(const fftw_plan plan);
240
+
241
+ void CUFFTAPI fftw_execute_dft(const fftw_plan plan,
242
+ fftw_complex *idata,
243
+ fftw_complex *odata);
244
+
245
+ void CUFFTAPI fftw_execute_dft_r2c(const fftw_plan plan,
246
+ double *idata,
247
+ fftw_complex *odata);
248
+
249
+ void CUFFTAPI fftw_execute_dft_c2r(const fftw_plan plan,
250
+ fftw_complex *idata,
251
+ double *odata);
252
+
253
+
254
+ // CUFFTW defines and supports the following single precision APIs
255
+
256
+ fftwf_plan CUFFTAPI fftwf_plan_dft_1d(int n,
257
+ fftwf_complex *in,
258
+ fftwf_complex *out,
259
+ int sign,
260
+ unsigned flags);
261
+
262
+ fftwf_plan CUFFTAPI fftwf_plan_dft_2d(int n0,
263
+ int n1,
264
+ fftwf_complex *in,
265
+ fftwf_complex *out,
266
+ int sign,
267
+ unsigned flags);
268
+
269
+ fftwf_plan CUFFTAPI fftwf_plan_dft_3d(int n0,
270
+ int n1,
271
+ int n2,
272
+ fftwf_complex *in,
273
+ fftwf_complex *out,
274
+ int sign,
275
+ unsigned flags);
276
+
277
+ fftwf_plan CUFFTAPI fftwf_plan_dft(int rank,
278
+ const int *n,
279
+ fftwf_complex *in,
280
+ fftwf_complex *out,
281
+ int sign,
282
+ unsigned flags);
283
+
284
+ fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_1d(int n,
285
+ float *in,
286
+ fftwf_complex *out,
287
+ unsigned flags);
288
+
289
+ fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_2d(int n0,
290
+ int n1,
291
+ float *in,
292
+ fftwf_complex *out,
293
+ unsigned flags);
294
+
295
+ fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_3d(int n0,
296
+ int n1,
297
+ int n2,
298
+ float *in,
299
+ fftwf_complex *out,
300
+ unsigned flags);
301
+
302
+ fftwf_plan CUFFTAPI fftwf_plan_dft_r2c(int rank,
303
+ const int *n,
304
+ float *in,
305
+ fftwf_complex *out,
306
+ unsigned flags);
307
+
308
+ fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_1d(int n,
309
+ fftwf_complex *in,
310
+ float *out,
311
+ unsigned flags);
312
+
313
+ fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_2d(int n0,
314
+ int n1,
315
+ fftwf_complex *in,
316
+ float *out,
317
+ unsigned flags);
318
+
319
+ fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_3d(int n0,
320
+ int n1,
321
+ int n2,
322
+ fftwf_complex *in,
323
+ float *out,
324
+ unsigned flags);
325
+
326
+ fftwf_plan CUFFTAPI fftwf_plan_dft_c2r(int rank,
327
+ const int *n,
328
+ fftwf_complex *in,
329
+ float *out,
330
+ unsigned flags);
331
+
332
+ fftwf_plan CUFFTAPI fftwf_plan_many_dft(int rank,
333
+ const int *n,
334
+ int batch,
335
+ fftwf_complex *in,
336
+ const int *inembed, int istride, int idist,
337
+ fftwf_complex *out,
338
+ const int *onembed, int ostride, int odist,
339
+ int sign, unsigned flags);
340
+
341
+ fftwf_plan CUFFTAPI fftwf_plan_many_dft_r2c(int rank,
342
+ const int *n,
343
+ int batch,
344
+ float *in,
345
+ const int *inembed, int istride, int idist,
346
+ fftwf_complex *out,
347
+ const int *onembed, int ostride, int odist,
348
+ unsigned flags);
349
+
350
+ fftwf_plan CUFFTAPI fftwf_plan_many_dft_c2r(int rank,
351
+ const int *n,
352
+ int batch,
353
+ fftwf_complex *in,
354
+ const int *inembed, int istride, int idist,
355
+ float *out,
356
+ const int *onembed, int ostride, int odist,
357
+ unsigned flags);
358
+
359
+ fftwf_plan CUFFTAPI fftwf_plan_guru_dft(int rank, const fftwf_iodim *dims,
360
+ int batch_rank, const fftwf_iodim *batch_dims,
361
+ fftwf_complex *in, fftwf_complex *out,
362
+ int sign, unsigned flags);
363
+
364
+ fftwf_plan CUFFTAPI fftwf_plan_guru_dft_r2c(int rank, const fftwf_iodim *dims,
365
+ int batch_rank, const fftwf_iodim *batch_dims,
366
+ float *in, fftwf_complex *out,
367
+ unsigned flags);
368
+
369
+ fftwf_plan CUFFTAPI fftwf_plan_guru_dft_c2r(int rank, const fftwf_iodim *dims,
370
+ int batch_rank, const fftwf_iodim *batch_dims,
371
+ fftwf_complex *in, float *out,
372
+ unsigned flags);
373
+
374
+ void CUFFTAPI fftwf_execute(const fftw_plan plan);
375
+
376
+ void CUFFTAPI fftwf_execute_dft(const fftwf_plan plan,
377
+ fftwf_complex *idata,
378
+ fftwf_complex *odata);
379
+
380
+ void CUFFTAPI fftwf_execute_dft_r2c(const fftwf_plan plan,
381
+ float *idata,
382
+ fftwf_complex *odata);
383
+
384
+ void CUFFTAPI fftwf_execute_dft_c2r(const fftwf_plan plan,
385
+ fftwf_complex *idata,
386
+ float *odata);
387
+
388
+ /// CUFFTW 64-bit Guru Interface
389
+ /// dp
390
+ fftw_plan CUFFTAPI fftw_plan_guru64_dft(int rank, const fftw_iodim64* dims, int batch_rank, const fftw_iodim64* batch_dims, fftw_complex* in, fftw_complex* out, int sign, unsigned flags);
391
+
392
+ fftw_plan CUFFTAPI fftw_plan_guru64_dft_r2c(int rank, const fftw_iodim64* dims, int batch_rank, const fftw_iodim64* batch_dims, double* in, fftw_complex* out, unsigned flags);
393
+
394
+ fftw_plan CUFFTAPI fftw_plan_guru64_dft_c2r(int rank, const fftw_iodim64* dims, int batch_rank, const fftw_iodim64* batch_dims, fftw_complex* in, double* out, unsigned flags);
395
+
396
+ /// sp
397
+ fftwf_plan CUFFTAPI fftwf_plan_guru64_dft(int rank, const fftwf_iodim64* dims, int batch_rank, const fftwf_iodim64* batch_dims, fftwf_complex* in, fftwf_complex* out, int sign, unsigned flags);
398
+
399
+ fftwf_plan CUFFTAPI fftwf_plan_guru64_dft_r2c(int rank, const fftwf_iodim64* dims, int batch_rank, const fftwf_iodim64* batch_dims, float* in, fftwf_complex* out, unsigned flags);
400
+
401
+ fftwf_plan CUFFTAPI fftwf_plan_guru64_dft_c2r(int rank, const fftwf_iodim64* dims, int batch_rank, const fftwf_iodim64* batch_dims, fftwf_complex* in, float* out, unsigned flags);
402
+
403
+ #ifdef _WIN32
404
+ #define _CUFFTAPI(T) T CUFFTAPI
405
+ #else
406
+ #define _CUFFTAPI(T) CUFFTAPI T
407
+ #endif
408
+
409
+ // CUFFTW defines and supports the following support APIs
410
+ _CUFFTAPI(void *) fftw_malloc(size_t n);
411
+
412
+ _CUFFTAPI(void *) fftwf_malloc(size_t n);
413
+
414
+ void CUFFTAPI fftw_free(void *pointer);
415
+
416
+ void CUFFTAPI fftwf_free(void *pointer);
417
+
418
+ void CUFFTAPI fftw_export_wisdom_to_file(FILE * output_file);
419
+
420
+ void CUFFTAPI fftwf_export_wisdom_to_file(FILE * output_file);
421
+
422
+ void CUFFTAPI fftw_import_wisdom_from_file(FILE * input_file);
423
+
424
+ void CUFFTAPI fftwf_import_wisdom_from_file(FILE * input_file);
425
+
426
+ void CUFFTAPI fftw_print_plan(const fftw_plan plan);
427
+
428
+ void CUFFTAPI fftwf_print_plan(const fftwf_plan plan);
429
+
430
+ void CUFFTAPI fftw_set_timelimit(double seconds);
431
+
432
+ void CUFFTAPI fftwf_set_timelimit(double seconds);
433
+
434
+ double CUFFTAPI fftw_cost(const fftw_plan plan);
435
+
436
+ double CUFFTAPI fftwf_cost(const fftw_plan plan);
437
+
438
+ void CUFFTAPI fftw_flops(const fftw_plan plan, double *add, double *mul, double *fma);
439
+
440
+ void CUFFTAPI fftwf_flops(const fftw_plan plan, double *add, double *mul, double *fma);
441
+
442
+ void CUFFTAPI fftw_destroy_plan(fftw_plan plan);
443
+
444
+ void CUFFTAPI fftwf_destroy_plan(fftwf_plan plan);
445
+
446
+ void CUFFTAPI fftw_cleanup(void);
447
+
448
+ void CUFFTAPI fftwf_cleanup(void);
449
+
450
+ #ifdef __cplusplus
451
+ }
452
+ #endif
453
+
454
+ #endif /* _CUFFTW_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab06d9dfcfaf88ec2bcfb4c16b76ff0bf3b2728370d212e28607f53e1d40eff5
3
+ size 1614344
parrot/lib/python3.10/site-packages/pyarrow/libarrow_python.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2348f3a31de750d197ceb0dff6a53f2820358a8f062841d2f70130c3e60ae59d
3
+ size 2848728
parrot/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc ADDED
Binary file (18.6 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc ADDED
Binary file (28.3 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert BEiT checkpoints from the unilm repository."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from datasets import load_dataset
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import (
29
+ BeitConfig,
30
+ BeitForImageClassification,
31
+ BeitForMaskedImageModeling,
32
+ BeitForSemanticSegmentation,
33
+ BeitImageProcessor,
34
+ )
35
+ from transformers.image_utils import PILImageResampling
36
+ from transformers.utils import logging
37
+
38
+
39
+ logging.set_verbosity_info()
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ # here we list all keys to be renamed (original name on the left, our name on the right)
44
+ def create_rename_keys(config, has_lm_head=False, is_semantic=False):
45
+ prefix = "backbone." if is_semantic else ""
46
+
47
+ rename_keys = []
48
+ for i in range(config.num_hidden_layers):
49
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
50
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight"))
51
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias"))
52
+ rename_keys.append(
53
+ (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight")
54
+ )
55
+ rename_keys.append(
56
+ (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias")
57
+ )
58
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight"))
59
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias"))
60
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight"))
61
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias"))
62
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight"))
63
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias"))
64
+
65
+ # projection layer + position embeddings
66
+ rename_keys.extend(
67
+ [
68
+ (f"{prefix}cls_token", "beit.embeddings.cls_token"),
69
+ (f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
70
+ (f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
71
+ ]
72
+ )
73
+
74
+ if has_lm_head:
75
+ # mask token + shared relative position bias + layernorm
76
+ rename_keys.extend(
77
+ [
78
+ ("mask_token", "beit.embeddings.mask_token"),
79
+ (
80
+ "rel_pos_bias.relative_position_bias_table",
81
+ "beit.encoder.relative_position_bias.relative_position_bias_table",
82
+ ),
83
+ (
84
+ "rel_pos_bias.relative_position_index",
85
+ "beit.encoder.relative_position_bias.relative_position_index",
86
+ ),
87
+ ("norm.weight", "layernorm.weight"),
88
+ ("norm.bias", "layernorm.bias"),
89
+ ]
90
+ )
91
+ elif is_semantic:
92
+ # semantic segmentation classification heads
93
+ rename_keys.extend(
94
+ [
95
+ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
96
+ ("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
97
+ ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
98
+ ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
99
+ ]
100
+ )
101
+ else:
102
+ # layernorm + classification head
103
+ rename_keys.extend(
104
+ [
105
+ ("fc_norm.weight", "beit.pooler.layernorm.weight"),
106
+ ("fc_norm.bias", "beit.pooler.layernorm.bias"),
107
+ ("head.weight", "classifier.weight"),
108
+ ("head.bias", "classifier.bias"),
109
+ ]
110
+ )
111
+
112
+ return rename_keys
113
+
114
+
115
+ # we split up the matrix of each encoder layer into queries, keys and values
116
+ def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False):
117
+ for i in range(config.num_hidden_layers):
118
+ prefix = "backbone." if is_semantic else ""
119
+ # queries, keys and values
120
+ in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
121
+ q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
122
+ v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
123
+
124
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
125
+ : config.hidden_size, :
126
+ ]
127
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
128
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
129
+ config.hidden_size : config.hidden_size * 2, :
130
+ ]
131
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
132
+ -config.hidden_size :, :
133
+ ]
134
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
135
+
136
+ # gamma_1 and gamma_2
137
+ # we call them lambda because otherwise they are renamed when using .from_pretrained
138
+ gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
139
+ gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
140
+
141
+ state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1
142
+ state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2
143
+
144
+ # relative_position bias table + index
145
+ if not has_lm_head:
146
+ # each layer has its own relative position bias
147
+ table = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_bias_table")
148
+ index = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_index")
149
+
150
+ state_dict[
151
+ f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table"
152
+ ] = table
153
+ state_dict[
154
+ f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index"
155
+ ] = index
156
+
157
+
158
+ def rename_key(dct, old, new):
159
+ val = dct.pop(old)
160
+ dct[new] = val
161
+
162
+
163
+ # We will verify our results on an image of cute cats
164
+ def prepare_img():
165
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
166
+ im = Image.open(requests.get(url, stream=True).raw)
167
+ return im
168
+
169
+
170
+ @torch.no_grad()
171
+ def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path):
172
+ """
173
+ Copy/paste/tweak model's weights to our BEiT structure.
174
+ """
175
+
176
+ # define default BEiT configuration
177
+ config = BeitConfig()
178
+ has_lm_head = False
179
+ is_semantic = False
180
+ repo_id = "huggingface/label-files"
181
+ # set config parameters based on URL
182
+ if checkpoint_url[-9:-4] == "pt22k":
183
+ # masked image modeling
184
+ config.use_shared_relative_position_bias = True
185
+ config.use_mask_token = True
186
+ has_lm_head = True
187
+ elif checkpoint_url[-9:-4] == "ft22k":
188
+ # intermediate fine-tuning on ImageNet-22k
189
+ config.use_relative_position_bias = True
190
+ config.num_labels = 21841
191
+ filename = "imagenet-22k-id2label.json"
192
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
193
+ id2label = {int(k): v for k, v in id2label.items()}
194
+ # this dataset contains 21843 labels but the model only has 21841
195
+ # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
196
+ del id2label[9205]
197
+ del id2label[15027]
198
+ config.id2label = id2label
199
+ config.label2id = {v: k for k, v in id2label.items()}
200
+ elif checkpoint_url[-8:-4] == "to1k":
201
+ # fine-tuning on ImageNet-1k
202
+ config.use_relative_position_bias = True
203
+ config.num_labels = 1000
204
+ filename = "imagenet-1k-id2label.json"
205
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
206
+ id2label = {int(k): v for k, v in id2label.items()}
207
+ config.id2label = id2label
208
+ config.label2id = {v: k for k, v in id2label.items()}
209
+ if "384" in checkpoint_url:
210
+ config.image_size = 384
211
+ if "512" in checkpoint_url:
212
+ config.image_size = 512
213
+ elif "ade20k" in checkpoint_url:
214
+ # fine-tuning
215
+ config.use_relative_position_bias = True
216
+ config.num_labels = 150
217
+ filename = "ade20k-id2label.json"
218
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
219
+ id2label = {int(k): v for k, v in id2label.items()}
220
+ config.id2label = id2label
221
+ config.label2id = {v: k for k, v in id2label.items()}
222
+ config.image_size = 640
223
+ is_semantic = True
224
+ else:
225
+ raise ValueError("Checkpoint not supported, URL should either end with 'pt22k', 'ft22k', 'to1k' or 'ade20k'")
226
+
227
+ # size of the architecture
228
+ if "base" in checkpoint_url:
229
+ pass
230
+ elif "large" in checkpoint_url:
231
+ config.hidden_size = 1024
232
+ config.intermediate_size = 4096
233
+ config.num_hidden_layers = 24
234
+ config.num_attention_heads = 16
235
+ if "ade20k" in checkpoint_url:
236
+ config.image_size = 640
237
+ config.out_indices = [7, 11, 15, 23]
238
+ else:
239
+ raise ValueError("Should either find 'base' or 'large' in checkpoint URL")
240
+
241
+ # load state_dict of original model, remove and rename some keys
242
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", check_hash=True)
243
+ state_dict = state_dict["model"] if "ade20k" not in checkpoint_url else state_dict["state_dict"]
244
+
245
+ rename_keys = create_rename_keys(config, has_lm_head=has_lm_head, is_semantic=is_semantic)
246
+ for src, dest in rename_keys:
247
+ rename_key(state_dict, src, dest)
248
+ read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head, is_semantic=is_semantic)
249
+ if is_semantic:
250
+ # add prefix to decoder keys
251
+ for key, val in state_dict.copy().items():
252
+ val = state_dict.pop(key)
253
+ if key.startswith("backbone.fpn"):
254
+ key = key.replace("backbone.fpn", "fpn")
255
+ state_dict[key] = val
256
+
257
+ # load HuggingFace model
258
+ if checkpoint_url[-9:-4] == "pt22k":
259
+ model = BeitForMaskedImageModeling(config)
260
+ elif "ade20k" in checkpoint_url:
261
+ model = BeitForSemanticSegmentation(config)
262
+ else:
263
+ model = BeitForImageClassification(config)
264
+ model.eval()
265
+ model.load_state_dict(state_dict)
266
+
267
+ # Check outputs on an image
268
+ if is_semantic:
269
+ image_processor = BeitImageProcessor(size=config.image_size, do_center_crop=False)
270
+ ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
271
+ image = Image.open(ds[0]["file"])
272
+ else:
273
+ image_processor = BeitImageProcessor(
274
+ size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False
275
+ )
276
+ image = prepare_img()
277
+
278
+ encoding = image_processor(images=image, return_tensors="pt")
279
+ pixel_values = encoding["pixel_values"]
280
+
281
+ outputs = model(pixel_values)
282
+ logits = outputs.logits
283
+
284
+ # verify logits
285
+ expected_shape = torch.Size([1, 1000])
286
+ if checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k"):
287
+ expected_shape = torch.Size([1, 196, 8192])
288
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k"):
289
+ expected_shape = torch.Size([1, 196, 8192])
290
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22k"):
291
+ expected_shape = torch.Size([1, 21841])
292
+ expected_logits = torch.tensor([2.2288, 2.4671, 0.7395])
293
+ expected_class_idx = 2397
294
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22k"):
295
+ expected_shape = torch.Size([1, 21841])
296
+ expected_logits = torch.tensor([1.6881, -0.2787, 0.5901])
297
+ expected_class_idx = 2396
298
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft1k"):
299
+ expected_logits = torch.tensor([0.1241, 0.0798, -0.6569])
300
+ expected_class_idx = 285
301
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22kto1k"):
302
+ expected_logits = torch.tensor([-1.2385, -1.0987, -1.0108])
303
+ expected_class_idx = 281
304
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_384_pt22k_ft22kto1k"):
305
+ expected_logits = torch.tensor([-1.5303, -0.9484, -0.3147])
306
+ expected_class_idx = 761
307
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft1k"):
308
+ expected_logits = torch.tensor([0.4610, -0.0928, 0.2086])
309
+ expected_class_idx = 761
310
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22kto1k"):
311
+ expected_logits = torch.tensor([-0.4804, 0.6257, -0.1837])
312
+ expected_class_idx = 761
313
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_384_pt22k_ft22kto1k"):
314
+ expected_logits = torch.tensor([[-0.5122, 0.5117, -0.2113]])
315
+ expected_class_idx = 761
316
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_512_pt22k_ft22kto1k"):
317
+ expected_logits = torch.tensor([-0.3062, 0.7261, 0.4852])
318
+ expected_class_idx = 761
319
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_640_pt22k_ft22ktoade20k"):
320
+ expected_shape = (1, 150, 160, 160)
321
+ expected_logits = torch.tensor(
322
+ [
323
+ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
324
+ [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
325
+ [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
326
+ ]
327
+ )
328
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_640_pt22k_ft22ktoade20k"):
329
+ expected_shape = (1, 150, 160, 160)
330
+ expected_logits = torch.tensor(
331
+ [
332
+ [[-4.3305, -2.3049, -3.0161], [-2.9591, -1.5305, -2.2251], [-3.4198, -1.8004, -2.9062]],
333
+ [[-5.8922, -3.7435, -4.3978], [-4.2063, -2.7872, -3.4755], [-4.2791, -3.1874, -4.1681]],
334
+ [[0.9895, 4.3467, 4.7663], [4.2476, 5.6830, 6.1518], [4.5550, 6.2495, 6.5154]],
335
+ ]
336
+ )
337
+ else:
338
+ raise ValueError("Can't verify logits as model is not supported")
339
+
340
+ if logits.shape != expected_shape:
341
+ raise ValueError(f"Shape of logits not as expected. {logits.shape=}, {expected_shape=}")
342
+ if not has_lm_head:
343
+ if is_semantic:
344
+ if not torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-3):
345
+ raise ValueError("First elements of logits not as expected")
346
+ else:
347
+ print("Predicted class idx:", logits.argmax(-1).item())
348
+
349
+ if not torch.allclose(logits[0, :3], expected_logits, atol=1e-3):
350
+ raise ValueError("First elements of logits not as expected")
351
+ if logits.argmax(-1).item() != expected_class_idx:
352
+ raise ValueError("Predicted class index not as expected")
353
+
354
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
355
+ print(f"Saving model to {pytorch_dump_folder_path}")
356
+ model.save_pretrained(pytorch_dump_folder_path)
357
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
358
+ image_processor.save_pretrained(pytorch_dump_folder_path)
359
+
360
+
361
+ if __name__ == "__main__":
362
+ parser = argparse.ArgumentParser()
363
+
364
+ parser.add_argument(
365
+ "--checkpoint_url",
366
+ default="https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth",
367
+ type=str,
368
+ help="URL to the original PyTorch checkpoint (.pth file).",
369
+ )
370
+ parser.add_argument(
371
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
372
+ )
373
+ args = parser.parse_args()
374
+ convert_beit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
parrot/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py ADDED
@@ -0,0 +1,948 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Microsoft Research and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from typing import Callable, List, Optional, Tuple
18
+
19
+ import flax
20
+ import flax.linen as nn
21
+ import jax
22
+ import jax.numpy as jnp
23
+ import numpy as np
24
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
25
+ from flax.linen.attention import dot_product_attention_weights
26
+ from flax.traverse_util import flatten_dict, unflatten_dict
27
+
28
+ from ...modeling_flax_outputs import (
29
+ FlaxBaseModelOutput,
30
+ FlaxBaseModelOutputWithPooling,
31
+ FlaxMaskedLMOutput,
32
+ FlaxSequenceClassifierOutput,
33
+ )
34
+ from ...modeling_flax_utils import (
35
+ ACT2FN,
36
+ FlaxPreTrainedModel,
37
+ append_replace_return_docstrings,
38
+ overwrite_call_docstring,
39
+ )
40
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward
41
+ from .configuration_beit import BeitConfig
42
+
43
+
44
+ @flax.struct.dataclass
45
+ class FlaxBeitModelOutputWithPooling(FlaxBaseModelOutputWithPooling):
46
+ """
47
+ Class for outputs of [`FlaxBeitModel`].
48
+
49
+ Args:
50
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
51
+ Sequence of hidden-states at the output of the last layer of the model.
52
+ pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`):
53
+ Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
54
+ *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
55
+ will be returned.
56
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
57
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
58
+ `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
59
+ the initial embedding outputs.
60
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
61
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
62
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
63
+ the self-attention heads.
64
+ """
65
+
66
+
67
+ BEIT_START_DOCSTRING = r"""
68
+
69
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
70
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
71
+
72
+ This model is also a
73
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
74
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
75
+ behavior.
76
+
77
+ Finally, this model supports inherent JAX features such as:
78
+
79
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
80
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
81
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
82
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
83
+
84
+ Parameters:
85
+ config ([`BeitConfig`]): Model configuration class with all the parameters of the model.
86
+ Initializing with a config file does not load the weights associated with the model, only the
87
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
88
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
89
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
90
+ `jax.numpy.bfloat16` (on TPUs).
91
+
92
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
93
+ specified all the computation will be performed with the given `dtype`.
94
+
95
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
96
+ parameters.**
97
+
98
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
99
+ [`~FlaxPreTrainedModel.to_bf16`].
100
+ """
101
+
102
+ BEIT_INPUTS_DOCSTRING = r"""
103
+ Args:
104
+ pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
105
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
106
+ [`AutoImageProcessor.__call__`] for details.
107
+
108
+ output_attentions (`bool`, *optional*):
109
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
110
+ tensors for more detail.
111
+ output_hidden_states (`bool`, *optional*):
112
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
113
+ more detail.
114
+ return_dict (`bool`, *optional*):
115
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
116
+ """
117
+
118
+
119
+ def relative_position_index_init(window_size: Tuple[int, int]) -> jnp.ndarray:
120
+ """
121
+ get pair-wise relative position index for each token inside the window
122
+ """
123
+ num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
124
+
125
+ coords_h = np.arange(window_size[0])
126
+ coords_w = np.arange(window_size[1])
127
+ coords = np.stack(np.meshgrid(coords_h, coords_w, indexing="ij")) # 2, Wh, Ww
128
+ coords_flatten = np.reshape(coords, (2, -1))
129
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
130
+ relative_coords = np.transpose(relative_coords, (1, 2, 0)) # Wh*Ww, Wh*Ww, 2
131
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
132
+ relative_coords[:, :, 1] += window_size[1] - 1
133
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
134
+
135
+ relative_position_index = np.zeros(shape=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
136
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
137
+ relative_position_index[0, 0:] = num_relative_distance - 3
138
+ relative_position_index[0:, 0] = num_relative_distance - 2
139
+ relative_position_index[0, 0] = num_relative_distance - 1
140
+ return jnp.array(relative_position_index)
141
+
142
+
143
+ def ones_with_scale(key, shape, scale, dtype=jnp.float32):
144
+ return jnp.ones(shape, dtype) * scale
145
+
146
+
147
+ class FlaxBeitDropPath(nn.Module):
148
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
149
+
150
+ rate: float
151
+
152
+ @nn.module.compact
153
+ def __call__(self, inputs, deterministic: Optional[bool] = True):
154
+ if self.rate == 0.0:
155
+ return inputs
156
+ keep_prob = 1.0 - self.rate
157
+ if deterministic:
158
+ return inputs
159
+ else:
160
+ shape = (inputs.shape[0],) + (1,) * (inputs.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
161
+ rng = self.make_rng("droppath")
162
+ random_tensor = keep_prob + jax.random.uniform(rng, shape=shape, dtype=inputs.dtype)
163
+ binary_tensor = jnp.floor(random_tensor)
164
+ output = inputs / keep_prob * binary_tensor
165
+ return output
166
+
167
+
168
+ class FlaxBeitPatchEmbeddings(nn.Module):
169
+ config: BeitConfig
170
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
171
+
172
+ def setup(self):
173
+ self.num_channels = self.config.num_channels
174
+ image_size = self.config.image_size
175
+ patch_size = self.config.patch_size
176
+ num_patches = (image_size // patch_size) * (image_size // patch_size)
177
+ patch_shape = (image_size // patch_size, image_size // patch_size)
178
+ self.num_patches = num_patches
179
+ self.patch_shape = patch_shape
180
+ self.projection = nn.Conv(
181
+ self.config.hidden_size,
182
+ kernel_size=(patch_size, patch_size),
183
+ strides=(patch_size, patch_size),
184
+ padding="VALID",
185
+ dtype=self.dtype,
186
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
187
+ )
188
+
189
+ def __call__(self, pixel_values):
190
+ num_channels = pixel_values.shape[-1]
191
+ if num_channels != self.num_channels:
192
+ raise ValueError(
193
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
194
+ )
195
+ embeddings = self.projection(pixel_values)
196
+ batch_size, _, _, channels = embeddings.shape
197
+ return jnp.reshape(embeddings, (batch_size, -1, channels))
198
+
199
+
200
+ class FlaxBeitEmbeddings(nn.Module):
201
+ """Construct the CLS token, position and patch embeddings."""
202
+
203
+ config: BeitConfig
204
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
205
+
206
+ def setup(self):
207
+ self.cls_token = self.param("cls_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
208
+ if self.config.use_mask_token:
209
+ self.mask_token = self.param("mask_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
210
+ self.patch_embeddings = FlaxBeitPatchEmbeddings(self.config, dtype=self.dtype)
211
+ num_patches = self.patch_embeddings.num_patches
212
+ if self.config.use_absolute_position_embeddings:
213
+ self.position_embeddings = self.param(
214
+ "position_embeddings", nn.initializers.zeros, (1, num_patches + 1, self.config.hidden_size)
215
+ )
216
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
217
+
218
+ def __call__(self, pixel_values, bool_masked_pos=None, deterministic=True):
219
+ embeddings = self.patch_embeddings(pixel_values)
220
+ batch_size, seq_len, _ = embeddings.shape
221
+
222
+ cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size))
223
+ cls_tokens = cls_tokens.astype(embeddings.dtype)
224
+
225
+ if bool_masked_pos is not None:
226
+ mask_tokens = jnp.broadcast_to(self.mask_token, (batch_size, seq_len, self.config.hidden_size))
227
+ mask_tokens = mask_tokens.astype(embeddings.dtype)
228
+ # replace the masked visual tokens by mask_tokens
229
+ w = jnp.expand_dims(bool_masked_pos, axis=-1)
230
+ embeddings = embeddings * (1 - w) + mask_tokens * w
231
+
232
+ embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1)
233
+
234
+ if self.config.use_absolute_position_embeddings:
235
+ embeddings = embeddings + self.position_embeddings.astype(embeddings.dtype)
236
+
237
+ embeddings = self.dropout(embeddings, deterministic=deterministic)
238
+ return embeddings
239
+
240
+
241
+ class FlaxBeitRelativePositionBias(nn.Module):
242
+ config: BeitConfig
243
+ window_size: Tuple[int, int]
244
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
245
+
246
+ def setup(self):
247
+ num_relative_distance = (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) + 3
248
+ self.relative_position_bias_table = self.param(
249
+ "relative_position_bias_table",
250
+ nn.initializers.zeros,
251
+ (num_relative_distance, self.config.num_attention_heads),
252
+ ) # 2*Wh-1 * 2*Ww-1, nH
253
+ # cls to token & token 2 cls & cls to cls
254
+
255
+ self.relative_position_index = relative_position_index_init(self.window_size)
256
+
257
+ def __call__(self):
258
+ index = self.relative_position_index.reshape(-1)
259
+ shape = (self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1)
260
+ relative_position_bias = self.relative_position_bias_table[index].reshape(shape) # Wh*Ww,Wh*Ww,nH
261
+ return jnp.transpose(relative_position_bias, (2, 0, 1))
262
+
263
+
264
+ class FlaxBeitSelfAttention(nn.Module):
265
+ config: BeitConfig
266
+ window_size: Tuple[int, int]
267
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
268
+
269
+ def setup(self):
270
+ if self.config.hidden_size % self.config.num_attention_heads != 0 and not hasattr(
271
+ self.config, "embedding_size"
272
+ ):
273
+ raise ValueError(
274
+ f"The hidden size {self.config.hidden_size,} is not a multiple of the number of attention "
275
+ f"heads {self.config.num_attention_heads}."
276
+ )
277
+
278
+ self.query = nn.Dense(
279
+ self.config.hidden_size,
280
+ dtype=self.dtype,
281
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
282
+ )
283
+ self.key = nn.Dense(
284
+ self.config.hidden_size,
285
+ dtype=self.dtype,
286
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
287
+ use_bias=False,
288
+ )
289
+ self.value = nn.Dense(
290
+ self.config.hidden_size,
291
+ dtype=self.dtype,
292
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
293
+ )
294
+
295
+ self.relative_position_bias = (
296
+ FlaxBeitRelativePositionBias(self.config, window_size=self.window_size, dtype=self.dtype)
297
+ if self.window_size
298
+ else None
299
+ )
300
+
301
+ def __call__(
302
+ self, hidden_states, relative_position_bias=None, deterministic: bool = True, output_attentions: bool = False
303
+ ):
304
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
305
+
306
+ query_states = self.query(hidden_states).reshape(
307
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
308
+ )
309
+ value_states = self.value(hidden_states).reshape(
310
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
311
+ )
312
+ key_states = self.key(hidden_states).reshape(
313
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
314
+ )
315
+
316
+ dropout_rng = None
317
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
318
+ dropout_rng = self.make_rng("dropout")
319
+
320
+ attention_bias = jnp.array(0.0, dtype=self.dtype)
321
+ # Add relative position bias if present.
322
+ if self.relative_position_bias is not None:
323
+ attention_bias = jnp.expand_dims(self.relative_position_bias(), 0)
324
+ attention_bias = attention_bias.astype(query_states.dtype)
325
+
326
+ # Add shared relative position bias if provided.
327
+ if relative_position_bias is not None:
328
+ attention_bias = attention_bias + relative_position_bias.astype(attention_bias.dtype)
329
+
330
+ attn_weights = dot_product_attention_weights(
331
+ query_states,
332
+ key_states,
333
+ bias=attention_bias,
334
+ dropout_rng=dropout_rng,
335
+ dropout_rate=self.config.attention_probs_dropout_prob,
336
+ broadcast_dropout=True,
337
+ deterministic=deterministic,
338
+ dtype=self.dtype,
339
+ precision=None,
340
+ )
341
+
342
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
343
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
344
+
345
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
346
+ return outputs
347
+
348
+
349
+ class FlaxBeitSelfOutput(nn.Module):
350
+ config: BeitConfig
351
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
352
+
353
+ def setup(self):
354
+ self.dense = nn.Dense(
355
+ self.config.hidden_size,
356
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
357
+ dtype=self.dtype,
358
+ )
359
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
360
+
361
+ def __call__(self, hidden_states, deterministic: bool = True):
362
+ hidden_states = self.dense(hidden_states)
363
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
364
+ return hidden_states
365
+
366
+
367
+ class FlaxBeitAttention(nn.Module):
368
+ config: BeitConfig
369
+ window_size: Tuple[int, int]
370
+ dtype: jnp.dtype = jnp.float32
371
+
372
+ def setup(self):
373
+ self.attention = FlaxBeitSelfAttention(self.config, self.window_size, dtype=self.dtype)
374
+ self.output = FlaxBeitSelfOutput(self.config, dtype=self.dtype)
375
+
376
+ def __call__(
377
+ self, hidden_states, relative_position_bias=None, deterministic=True, output_attentions: bool = False
378
+ ):
379
+ attn_outputs = self.attention(
380
+ hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions
381
+ )
382
+ attn_output = attn_outputs[0]
383
+ attn_output = self.output(attn_output, deterministic=deterministic)
384
+
385
+ outputs = (attn_output,)
386
+
387
+ if output_attentions:
388
+ outputs += (attn_outputs[1],)
389
+
390
+ return outputs
391
+
392
+
393
+ class FlaxBeitIntermediate(nn.Module):
394
+ config: BeitConfig
395
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
396
+
397
+ def setup(self):
398
+ self.dense = nn.Dense(
399
+ self.config.intermediate_size,
400
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
401
+ dtype=self.dtype,
402
+ )
403
+ self.activation = ACT2FN[self.config.hidden_act]
404
+
405
+ def __call__(self, hidden_states):
406
+ hidden_states = self.dense(hidden_states)
407
+ hidden_states = self.activation(hidden_states)
408
+
409
+ return hidden_states
410
+
411
+
412
+ class FlaxBeitOutput(nn.Module):
413
+ config: BeitConfig
414
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
415
+
416
+ def setup(self):
417
+ self.dense = nn.Dense(
418
+ self.config.hidden_size,
419
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
420
+ dtype=self.dtype,
421
+ )
422
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
423
+
424
+ def __call__(self, hidden_states, deterministic: bool = True):
425
+ hidden_states = self.dense(hidden_states)
426
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
427
+
428
+ return hidden_states
429
+
430
+
431
+ class FlaxBeitLayer(nn.Module):
432
+ config: BeitConfig
433
+ window_size: Tuple[int, int]
434
+ drop_path_rate: float
435
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
436
+
437
+ def setup(self):
438
+ self.attention = FlaxBeitAttention(self.config, self.window_size, dtype=self.dtype)
439
+ self.intermediate = FlaxBeitIntermediate(self.config, dtype=self.dtype)
440
+ self.output = FlaxBeitOutput(self.config, dtype=self.dtype)
441
+ self.layernorm_before = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
442
+ self.drop_path = FlaxBeitDropPath(rate=self.drop_path_rate)
443
+ self.layernorm_after = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
444
+
445
+ self.init_values = self.config.layer_scale_init_value
446
+ if self.init_values > 0:
447
+ self.lambda_1 = self.param("lambda_1", ones_with_scale, (self.config.hidden_size), self.init_values)
448
+ self.lambda_2 = self.param("lambda_2", ones_with_scale, (self.config.hidden_size), self.init_values)
449
+ else:
450
+ self.lambda_1 = None
451
+ self.lambda_2 = None
452
+
453
+ def __call__(
454
+ self, hidden_states, relative_position_bias=None, deterministic: bool = True, output_attentions: bool = False
455
+ ):
456
+ self_attention_outputs = self.attention(
457
+ self.layernorm_before(hidden_states), # in BEiT, layernorm is applied before self-attention
458
+ relative_position_bias,
459
+ deterministic=deterministic,
460
+ output_attentions=output_attentions,
461
+ )
462
+ attention_output = self_attention_outputs[0]
463
+
464
+ # apply lambda_1 if present
465
+ if self.lambda_1 is not None:
466
+ attention_output = self.lambda_1.astype(attention_output.dtype) * attention_output
467
+
468
+ # first residual connection
469
+ hidden_states = self.drop_path(attention_output, deterministic=deterministic) + hidden_states
470
+
471
+ # in BEiT, layernorm is also applied after self-attention
472
+ layer_output = self.layernorm_after(hidden_states)
473
+
474
+ layer_output = self.intermediate(layer_output)
475
+ layer_output = self.output(layer_output, deterministic=deterministic)
476
+
477
+ # apply lambda_2 if present
478
+ if self.lambda_2 is not None:
479
+ layer_output = self.lambda_2.astype(layer_output.dtype) * layer_output
480
+
481
+ # second residual connection
482
+ layer_output = self.drop_path(layer_output, deterministic=deterministic) + hidden_states
483
+
484
+ outputs = (layer_output,)
485
+
486
+ if output_attentions:
487
+ outputs += (self_attention_outputs[1],)
488
+
489
+ return outputs
490
+
491
+
492
+ class FlaxBeitLayerCollection(nn.Module):
493
+ config: BeitConfig
494
+ window_size: Tuple[int, int]
495
+ drop_path_rates: List[float]
496
+ relative_position_bias: Callable[[], jnp.ndarray]
497
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
498
+
499
+ def setup(self):
500
+ self.layers = [
501
+ FlaxBeitLayer(
502
+ self.config,
503
+ window_size=self.window_size if self.config.use_relative_position_bias else None,
504
+ drop_path_rate=self.drop_path_rates[i],
505
+ name=str(i),
506
+ dtype=self.dtype,
507
+ )
508
+ for i in range(self.config.num_hidden_layers)
509
+ ]
510
+
511
+ def __call__(
512
+ self,
513
+ hidden_states,
514
+ deterministic: bool = True,
515
+ output_attentions: bool = False,
516
+ output_hidden_states: bool = False,
517
+ return_dict: bool = True,
518
+ ):
519
+ all_attentions = () if output_attentions else None
520
+ all_hidden_states = () if output_hidden_states else None
521
+
522
+ for i, layer in enumerate(self.layers):
523
+ if output_hidden_states:
524
+ all_hidden_states += (hidden_states,)
525
+ relative_position_bias = self.relative_position_bias() if self.relative_position_bias is not None else None
526
+ layer_outputs = layer(
527
+ hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions
528
+ )
529
+
530
+ hidden_states = layer_outputs[0]
531
+
532
+ if output_attentions:
533
+ all_attentions += (layer_outputs[1],)
534
+
535
+ if output_hidden_states:
536
+ all_hidden_states += (hidden_states,)
537
+
538
+ outputs = (hidden_states,)
539
+ if not return_dict:
540
+ return tuple(v for v in outputs if v is not None)
541
+
542
+ return FlaxBaseModelOutput(
543
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
544
+ )
545
+
546
+
547
+ class FlaxBeitEncoder(nn.Module):
548
+ config: BeitConfig
549
+ window_size: Tuple[int, int]
550
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
551
+
552
+ def setup(self):
553
+ if self.config.use_shared_relative_position_bias:
554
+ self.relative_position_bias = FlaxBeitRelativePositionBias(
555
+ config=self.config, window_size=self.window_size, dtype=self.dtype
556
+ )
557
+
558
+ # stochastic depth decay rule
559
+ drop_path_rates = list(np.linspace(0, self.config.drop_path_rate, self.config.num_hidden_layers))
560
+ self.layer = FlaxBeitLayerCollection(
561
+ self.config,
562
+ window_size=self.window_size,
563
+ drop_path_rates=drop_path_rates,
564
+ relative_position_bias=self.relative_position_bias
565
+ if self.config.use_shared_relative_position_bias
566
+ else None,
567
+ dtype=self.dtype,
568
+ )
569
+
570
+ def __call__(
571
+ self,
572
+ hidden_states,
573
+ deterministic: bool = True,
574
+ output_attentions: bool = False,
575
+ output_hidden_states: bool = False,
576
+ return_dict: bool = True,
577
+ ):
578
+ return self.layer(
579
+ hidden_states,
580
+ deterministic=deterministic,
581
+ output_attentions=output_attentions,
582
+ output_hidden_states=output_hidden_states,
583
+ return_dict=return_dict,
584
+ )
585
+
586
+
587
+ class FlaxBeitPreTrainedModel(FlaxPreTrainedModel):
588
+ """
589
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
590
+ models.
591
+ """
592
+
593
+ config_class = BeitConfig
594
+ base_model_prefix = "beit"
595
+ main_input_name = "pixel_values"
596
+ module_class: nn.Module = None
597
+
598
+ def __init__(
599
+ self,
600
+ config: BeitConfig,
601
+ input_shape=None,
602
+ seed: int = 0,
603
+ dtype: jnp.dtype = jnp.float32,
604
+ _do_init: bool = True,
605
+ **kwargs,
606
+ ):
607
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
608
+ if input_shape is None:
609
+ input_shape = (1, config.image_size, config.image_size, config.num_channels)
610
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
611
+
612
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
613
+ # init input tensors
614
+ pixel_values = jnp.zeros(input_shape, dtype=self.dtype)
615
+
616
+ params_rng, dropout_rng = jax.random.split(rng)
617
+ dropout_rng, droppath_rng = jax.random.split(dropout_rng)
618
+ rngs = {"params": params_rng, "dropout": dropout_rng, "droppath": droppath_rng}
619
+
620
+ random_params = self.module.init(rngs, pixel_values, return_dict=False)["params"]
621
+
622
+ if params is not None:
623
+ random_params = flatten_dict(unfreeze(random_params))
624
+ params = flatten_dict(unfreeze(params))
625
+ for missing_key in self._missing_keys:
626
+ params[missing_key] = random_params[missing_key]
627
+ self._missing_keys = set()
628
+ return freeze(unflatten_dict(params))
629
+ else:
630
+ return random_params
631
+
632
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
633
+ def __call__(
634
+ self,
635
+ pixel_values,
636
+ bool_masked_pos=None,
637
+ params: dict = None,
638
+ dropout_rng: jax.random.PRNGKey = None,
639
+ train: bool = False,
640
+ output_attentions: Optional[bool] = None,
641
+ output_hidden_states: Optional[bool] = None,
642
+ return_dict: Optional[bool] = None,
643
+ ):
644
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
645
+ output_hidden_states = (
646
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
647
+ )
648
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
649
+
650
+ pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
651
+ # Handle any PRNG if needed
652
+ rngs = {}
653
+ if dropout_rng is not None:
654
+ dropout_rng, droppath_rng = jax.random.split(dropout_rng)
655
+ rngs["dropout"] = dropout_rng
656
+ rngs["droppath"] = droppath_rng
657
+
658
+ return self.module.apply(
659
+ {"params": params or self.params},
660
+ jnp.array(pixel_values, dtype=jnp.float32),
661
+ bool_masked_pos,
662
+ not train,
663
+ output_attentions,
664
+ output_hidden_states,
665
+ return_dict,
666
+ rngs=rngs,
667
+ )
668
+
669
+
670
+ class FlaxBeitPooler(nn.Module):
671
+ config: BeitConfig
672
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
673
+
674
+ def setup(self):
675
+ if self.config.use_mean_pooling:
676
+ self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
677
+
678
+ def __call__(self, hidden_states):
679
+ if self.config.use_mean_pooling:
680
+ # Mean pool the final hidden states of the patch tokens
681
+ patch_tokens = hidden_states[:, 1:, :]
682
+ pooled_output = self.layernorm(jnp.mean(patch_tokens, axis=1))
683
+ else:
684
+ # Pool by simply taking the final hidden state of the [CLS] token
685
+ pooled_output = hidden_states[:, 0]
686
+
687
+ return pooled_output
688
+
689
+
690
+ class FlaxBeitModule(nn.Module):
691
+ config: BeitConfig
692
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
693
+ add_pooling_layer: bool = True
694
+
695
+ def setup(self):
696
+ self.embeddings = FlaxBeitEmbeddings(self.config, dtype=self.dtype)
697
+ self.encoder = FlaxBeitEncoder(
698
+ self.config, window_size=self.embeddings.patch_embeddings.patch_shape, dtype=self.dtype
699
+ )
700
+ if not self.config.use_mean_pooling:
701
+ self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
702
+ self.pooler = FlaxBeitPooler(self.config, dtype=self.dtype) if self.add_pooling_layer else None
703
+
704
+ def __call__(
705
+ self,
706
+ pixel_values,
707
+ bool_masked_pos=None,
708
+ deterministic: bool = True,
709
+ output_attentions: bool = False,
710
+ output_hidden_states: bool = False,
711
+ return_dict: bool = True,
712
+ ):
713
+ hidden_states = self.embeddings(pixel_values, bool_masked_pos, deterministic=deterministic)
714
+
715
+ outputs = self.encoder(
716
+ hidden_states,
717
+ deterministic=deterministic,
718
+ output_attentions=output_attentions,
719
+ output_hidden_states=output_hidden_states,
720
+ return_dict=return_dict,
721
+ )
722
+ hidden_states = outputs[0]
723
+ if not self.config.use_mean_pooling:
724
+ hidden_states = self.layernorm(hidden_states)
725
+ pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
726
+
727
+ if not return_dict:
728
+ # if pooled is None, don't return it
729
+ if pooled is None:
730
+ return (hidden_states,) + outputs[1:]
731
+ return (hidden_states, pooled) + outputs[1:]
732
+
733
+ return FlaxBeitModelOutputWithPooling(
734
+ last_hidden_state=hidden_states,
735
+ pooler_output=pooled,
736
+ hidden_states=outputs.hidden_states,
737
+ attentions=outputs.attentions,
738
+ )
739
+
740
+
741
+ @add_start_docstrings(
742
+ "The bare Beit Model transformer outputting raw hidden-states without any specific head on top.",
743
+ BEIT_START_DOCSTRING,
744
+ )
745
+ class FlaxBeitModel(FlaxBeitPreTrainedModel):
746
+ module_class = FlaxBeitModule
747
+
748
+
749
+ FLAX_BEIT_MODEL_DOCSTRING = """
750
+ Returns:
751
+
752
+ Examples:
753
+
754
+ ```python
755
+ >>> from transformers import AutoImageProcessor, FlaxBeitModel
756
+ >>> from PIL import Image
757
+ >>> import requests
758
+
759
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
760
+ >>> image = Image.open(requests.get(url, stream=True).raw)
761
+
762
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")
763
+ >>> model = FlaxBeitModel.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")
764
+
765
+ >>> inputs = image_processor(images=image, return_tensors="np")
766
+ >>> outputs = model(**inputs)
767
+ >>> last_hidden_states = outputs.last_hidden_state
768
+ ```
769
+ """
770
+
771
+ overwrite_call_docstring(FlaxBeitModel, FLAX_BEIT_MODEL_DOCSTRING)
772
+ append_replace_return_docstrings(FlaxBeitModel, output_type=FlaxBeitModelOutputWithPooling, config_class=BeitConfig)
773
+
774
+
775
+ class FlaxBeitForMaskedImageModelingModule(nn.Module):
776
+ config: BeitConfig
777
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
778
+
779
+ def setup(self):
780
+ self.beit = FlaxBeitModule(self.config, add_pooling_layer=False, dtype=self.dtype)
781
+
782
+ # Classifier head
783
+ self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
784
+ self.lm_head = nn.Dense(
785
+ self.config.vocab_size,
786
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
787
+ dtype=self.dtype,
788
+ )
789
+
790
+ def __call__(
791
+ self,
792
+ pixel_values=None,
793
+ bool_masked_pos=None,
794
+ deterministic: bool = True,
795
+ output_attentions=None,
796
+ output_hidden_states=None,
797
+ return_dict=None,
798
+ ):
799
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
800
+
801
+ outputs = self.beit(
802
+ pixel_values,
803
+ bool_masked_pos,
804
+ deterministic=deterministic,
805
+ output_attentions=output_attentions,
806
+ output_hidden_states=output_hidden_states,
807
+ return_dict=return_dict,
808
+ )
809
+
810
+ sequence_output = outputs[0]
811
+ sequence_output = self.layernorm(sequence_output)
812
+ prediction_scores = self.lm_head(sequence_output[:, 1:])
813
+
814
+ if not return_dict:
815
+ output = (prediction_scores,) + outputs[2:]
816
+ return output
817
+
818
+ return FlaxMaskedLMOutput(
819
+ logits=prediction_scores,
820
+ hidden_states=outputs.hidden_states,
821
+ attentions=outputs.attentions,
822
+ )
823
+
824
+
825
+ @add_start_docstrings(
826
+ "Beit Model transformer with a 'language' modeling head on top (to predict visual tokens).",
827
+ BEIT_START_DOCSTRING,
828
+ )
829
+ class FlaxBeitForMaskedImageModeling(FlaxBeitPreTrainedModel):
830
+ module_class = FlaxBeitForMaskedImageModelingModule
831
+
832
+
833
+ FLAX_BEIT_MLM_DOCSTRING = """
834
+ bool_masked_pos (`numpy.ndarray` of shape `(batch_size, num_patches)`):
835
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
836
+
837
+ Returns:
838
+
839
+ Examples:
840
+
841
+ ```python
842
+ >>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
843
+ >>> from PIL import Image
844
+ >>> import requests
845
+
846
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
847
+ >>> image = Image.open(requests.get(url, stream=True).raw)
848
+
849
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
850
+ >>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
851
+
852
+ >>> inputs = image_processor(images=image, return_tensors="np")
853
+ >>> outputs = model(**inputs)
854
+ >>> logits = outputs.logits
855
+ ```
856
+ """
857
+
858
+ overwrite_call_docstring(FlaxBeitForMaskedImageModeling, FLAX_BEIT_MLM_DOCSTRING)
859
+ append_replace_return_docstrings(
860
+ FlaxBeitForMaskedImageModeling, output_type=FlaxMaskedLMOutput, config_class=BeitConfig
861
+ )
862
+
863
+
864
+ class FlaxBeitForImageClassificationModule(nn.Module):
865
+ config: BeitConfig
866
+ dtype: jnp.dtype = jnp.float32
867
+
868
+ def setup(self):
869
+ self.beit = FlaxBeitModule(config=self.config, dtype=self.dtype, add_pooling_layer=True)
870
+ self.classifier = nn.Dense(
871
+ self.config.num_labels,
872
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
873
+ dtype=self.dtype,
874
+ )
875
+
876
+ def __call__(
877
+ self,
878
+ pixel_values=None,
879
+ bool_masked_pos=None,
880
+ deterministic: bool = True,
881
+ output_attentions=None,
882
+ output_hidden_states=None,
883
+ return_dict=None,
884
+ ):
885
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
886
+
887
+ outputs = self.beit(
888
+ pixel_values,
889
+ deterministic=deterministic,
890
+ output_attentions=output_attentions,
891
+ output_hidden_states=output_hidden_states,
892
+ return_dict=return_dict,
893
+ )
894
+
895
+ pooled_output = outputs[1]
896
+ logits = self.classifier(pooled_output)
897
+
898
+ if not return_dict:
899
+ output = (logits,) + outputs[2:]
900
+ return output
901
+
902
+ return FlaxSequenceClassifierOutput(
903
+ logits=logits,
904
+ hidden_states=outputs.hidden_states,
905
+ attentions=outputs.attentions,
906
+ )
907
+
908
+
909
+ @add_start_docstrings(
910
+ """
911
+ Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
912
+ hidden states of the patch tokens) e.g. for ImageNet.
913
+ """,
914
+ BEIT_START_DOCSTRING,
915
+ )
916
+ class FlaxBeitForImageClassification(FlaxBeitPreTrainedModel):
917
+ module_class = FlaxBeitForImageClassificationModule
918
+
919
+
920
+ FLAX_BEIT_CLASSIF_DOCSTRING = """
921
+ Returns:
922
+
923
+ Example:
924
+
925
+ ```python
926
+ >>> from transformers import AutoImageProcessor, FlaxBeitForImageClassification
927
+ >>> from PIL import Image
928
+ >>> import requests
929
+
930
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
931
+ >>> image = Image.open(requests.get(url, stream=True).raw)
932
+
933
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
934
+ >>> model = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224")
935
+
936
+ >>> inputs = image_processor(images=image, return_tensors="np")
937
+ >>> outputs = model(**inputs)
938
+ >>> logits = outputs.logits
939
+ >>> # model predicts one of the 1000 ImageNet classes
940
+ >>> predicted_class_idx = logits.argmax(-1).item()
941
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
942
+ ```
943
+ """
944
+
945
+ overwrite_call_docstring(FlaxBeitForImageClassification, FLAX_BEIT_CLASSIF_DOCSTRING)
946
+ append_replace_return_docstrings(
947
+ FlaxBeitForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=BeitConfig
948
+ )
parrot/lib/python3.10/site-packages/transformers/models/bertweet/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import _LazyModule
18
+
19
+
20
+ _import_structure = {"tokenization_bertweet": ["BertweetTokenizer"]}
21
+
22
+
23
+ if TYPE_CHECKING:
24
+ from .tokenization_bertweet import BertweetTokenizer
25
+
26
+ else:
27
+ import sys
28
+
29
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
parrot/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (497 Bytes). View file
 
parrot/lib/python3.10/site-packages/transformers/models/bertweet/tokenization_bertweet.py ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
3
+ # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Tokenization classes for BERTweet"""
17
+
18
+
19
+ import html
20
+ import os
21
+ import re
22
+ from shutil import copyfile
23
+ from typing import List, Optional, Tuple
24
+
25
+ import regex
26
+
27
+ from ...tokenization_utils import PreTrainedTokenizer
28
+ from ...utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ VOCAB_FILES_NAMES = {
34
+ "vocab_file": "vocab.txt",
35
+ "merges_file": "bpe.codes",
36
+ }
37
+
38
+
39
+ def get_pairs(word):
40
+ """
41
+ Return set of symbol pairs in a word.
42
+
43
+ Word is represented as tuple of symbols (symbols being variable-length strings).
44
+ """
45
+ pairs = set()
46
+ prev_char = word[0]
47
+ for char in word[1:]:
48
+ pairs.add((prev_char, char))
49
+ prev_char = char
50
+
51
+ pairs = set(pairs)
52
+ return pairs
53
+
54
+
55
+ class BertweetTokenizer(PreTrainedTokenizer):
56
+ """
57
+ Constructs a BERTweet tokenizer, using Byte-Pair-Encoding.
58
+
59
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
60
+ this superclass for more information regarding those methods.
61
+
62
+ Args:
63
+ vocab_file (`str`):
64
+ Path to the vocabulary file.
65
+ merges_file (`str`):
66
+ Path to the merges file.
67
+ normalization (`bool`, *optional*, defaults to `False`):
68
+ Whether or not to apply a normalization preprocess.
69
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
70
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
71
+
72
+ <Tip>
73
+
74
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
75
+ sequence. The token used is the `cls_token`.
76
+
77
+ </Tip>
78
+
79
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
80
+ The end of sequence token.
81
+
82
+ <Tip>
83
+
84
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
85
+ The token used is the `sep_token`.
86
+
87
+ </Tip>
88
+
89
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
90
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
91
+ sequence classification or for a text and a question for question answering. It is also used as the last
92
+ token of a sequence built with special tokens.
93
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
94
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
95
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
96
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
97
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
98
+ token instead.
99
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
100
+ The token used for padding, for example when batching sequences of different lengths.
101
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
102
+ The token used for masking values. This is the token used when training this model with masked language
103
+ modeling. This is the token which the model will try to predict.
104
+ """
105
+
106
+ vocab_files_names = VOCAB_FILES_NAMES
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_file,
111
+ merges_file,
112
+ normalization=False,
113
+ bos_token="<s>",
114
+ eos_token="</s>",
115
+ sep_token="</s>",
116
+ cls_token="<s>",
117
+ unk_token="<unk>",
118
+ pad_token="<pad>",
119
+ mask_token="<mask>",
120
+ **kwargs,
121
+ ):
122
+ try:
123
+ from emoji import demojize
124
+
125
+ self.demojizer = demojize
126
+ except ImportError:
127
+ logger.warning(
128
+ "emoji is not installed, thus not converting emoticons or emojis into text. Install emoji: pip3"
129
+ " install emoji==0.6.0"
130
+ )
131
+ self.demojizer = None
132
+
133
+ self.vocab_file = vocab_file
134
+ self.merges_file = merges_file
135
+
136
+ self.encoder = {}
137
+ self.encoder[str(bos_token)] = 0
138
+ self.encoder[str(pad_token)] = 1
139
+ self.encoder[str(eos_token)] = 2
140
+ self.encoder[str(unk_token)] = 3
141
+
142
+ self.add_from_file(vocab_file)
143
+
144
+ self.decoder = {v: k for k, v in self.encoder.items()}
145
+
146
+ with open(merges_file, encoding="utf-8") as merges_handle:
147
+ merges = merges_handle.read().split("\n")[:-1]
148
+ merges = [tuple(merge.split()[:-1]) for merge in merges]
149
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
150
+ self.cache = {}
151
+
152
+ self.normalization = normalization
153
+ self.tweetPreprocessor = TweetTokenizer()
154
+ self.special_puncts = {"’": "'", "…": "..."}
155
+
156
+ super().__init__(
157
+ normalization=normalization,
158
+ bos_token=bos_token,
159
+ eos_token=eos_token,
160
+ sep_token=sep_token,
161
+ cls_token=cls_token,
162
+ unk_token=unk_token,
163
+ pad_token=pad_token,
164
+ mask_token=mask_token,
165
+ **kwargs,
166
+ )
167
+
168
+ def build_inputs_with_special_tokens(
169
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
170
+ ) -> List[int]:
171
+ """
172
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
173
+ adding special tokens. A BERTweet sequence has the following format:
174
+
175
+ - single sequence: `<s> X </s>`
176
+ - pair of sequences: `<s> A </s></s> B </s>`
177
+
178
+ Args:
179
+ token_ids_0 (`List[int]`):
180
+ List of IDs to which the special tokens will be added.
181
+ token_ids_1 (`List[int]`, *optional*):
182
+ Optional second list of IDs for sequence pairs.
183
+
184
+ Returns:
185
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
186
+ """
187
+
188
+ if token_ids_1 is None:
189
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
190
+ cls = [self.cls_token_id]
191
+ sep = [self.sep_token_id]
192
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
193
+
194
+ def get_special_tokens_mask(
195
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
196
+ ) -> List[int]:
197
+ """
198
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
199
+ special tokens using the tokenizer `prepare_for_model` method.
200
+
201
+ Args:
202
+ token_ids_0 (`List[int]`):
203
+ List of IDs.
204
+ token_ids_1 (`List[int]`, *optional*):
205
+ Optional second list of IDs for sequence pairs.
206
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
207
+ Whether or not the token list is already formatted with special tokens for the model.
208
+
209
+ Returns:
210
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
211
+ """
212
+
213
+ if already_has_special_tokens:
214
+ return super().get_special_tokens_mask(
215
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
216
+ )
217
+
218
+ if token_ids_1 is None:
219
+ return [1] + ([0] * len(token_ids_0)) + [1]
220
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
221
+
222
+ def create_token_type_ids_from_sequences(
223
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
224
+ ) -> List[int]:
225
+ """
226
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does
227
+ not make use of token type ids, therefore a list of zeros is returned.
228
+
229
+ Args:
230
+ token_ids_0 (`List[int]`):
231
+ List of IDs.
232
+ token_ids_1 (`List[int]`, *optional*):
233
+ Optional second list of IDs for sequence pairs.
234
+
235
+ Returns:
236
+ `List[int]`: List of zeros.
237
+ """
238
+
239
+ sep = [self.sep_token_id]
240
+ cls = [self.cls_token_id]
241
+
242
+ if token_ids_1 is None:
243
+ return len(cls + token_ids_0 + sep) * [0]
244
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
245
+
246
+ @property
247
+ def vocab_size(self):
248
+ return len(self.encoder)
249
+
250
+ def get_vocab(self):
251
+ return dict(self.encoder, **self.added_tokens_encoder)
252
+
253
+ def bpe(self, token):
254
+ if token in self.cache:
255
+ return self.cache[token]
256
+ word = tuple(token)
257
+ word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
258
+ pairs = get_pairs(word)
259
+
260
+ if not pairs:
261
+ return token
262
+
263
+ while True:
264
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
265
+ if bigram not in self.bpe_ranks:
266
+ break
267
+ first, second = bigram
268
+ new_word = []
269
+ i = 0
270
+ while i < len(word):
271
+ try:
272
+ j = word.index(first, i)
273
+ except ValueError:
274
+ new_word.extend(word[i:])
275
+ break
276
+ else:
277
+ new_word.extend(word[i:j])
278
+ i = j
279
+
280
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
281
+ new_word.append(first + second)
282
+ i += 2
283
+ else:
284
+ new_word.append(word[i])
285
+ i += 1
286
+ new_word = tuple(new_word)
287
+ word = new_word
288
+ if len(word) == 1:
289
+ break
290
+ else:
291
+ pairs = get_pairs(word)
292
+ word = "@@ ".join(word)
293
+ word = word[:-4]
294
+ self.cache[token] = word
295
+ return word
296
+
297
+ def _tokenize(self, text):
298
+ """Tokenize a string."""
299
+ if self.normalization: # Perform Tweet normalization before performing BPE
300
+ text = self.normalizeTweet(text)
301
+
302
+ split_tokens = []
303
+ words = re.findall(r"\S+\n?", text)
304
+ for token in words:
305
+ split_tokens.extend(list(self.bpe(token).split(" ")))
306
+ return split_tokens
307
+
308
+ def normalizeTweet(self, tweet):
309
+ """
310
+ Normalize a raw Tweet
311
+ """
312
+ for punct in self.special_puncts:
313
+ tweet = tweet.replace(punct, self.special_puncts[punct])
314
+
315
+ tokens = self.tweetPreprocessor.tokenize(tweet)
316
+ normTweet = " ".join([self.normalizeToken(token) for token in tokens])
317
+
318
+ normTweet = (
319
+ normTweet.replace("cannot ", "can not ")
320
+ .replace("n't ", " n't ")
321
+ .replace("n 't ", " n't ")
322
+ .replace("ca n't", "can't")
323
+ .replace("ai n't", "ain't")
324
+ )
325
+ normTweet = (
326
+ normTweet.replace("'m ", " 'm ")
327
+ .replace("'re ", " 're ")
328
+ .replace("'s ", " 's ")
329
+ .replace("'ll ", " 'll ")
330
+ .replace("'d ", " 'd ")
331
+ .replace("'ve ", " 've ")
332
+ )
333
+ normTweet = (
334
+ normTweet.replace(" p . m .", " p.m.")
335
+ .replace(" p . m ", " p.m ")
336
+ .replace(" a . m .", " a.m.")
337
+ .replace(" a . m ", " a.m ")
338
+ )
339
+
340
+ return " ".join(normTweet.split())
341
+
342
+ def normalizeToken(self, token):
343
+ """
344
+ Normalize tokens in a Tweet
345
+ """
346
+ lowercased_token = token.lower()
347
+ if token.startswith("@"):
348
+ return "@USER"
349
+ elif lowercased_token.startswith("http") or lowercased_token.startswith("www"):
350
+ return "HTTPURL"
351
+ elif len(token) == 1:
352
+ if token in self.special_puncts:
353
+ return self.special_puncts[token]
354
+ if self.demojizer is not None:
355
+ return self.demojizer(token)
356
+ else:
357
+ return token
358
+ else:
359
+ return token
360
+
361
+ def _convert_token_to_id(self, token):
362
+ """Converts a token (str) in an id using the vocab."""
363
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
364
+
365
+ def _convert_id_to_token(self, index):
366
+ """Converts an index (integer) in a token (str) using the vocab."""
367
+ return self.decoder.get(index, self.unk_token)
368
+
369
+ def convert_tokens_to_string(self, tokens):
370
+ """Converts a sequence of tokens (string) in a single string."""
371
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
372
+ return out_string
373
+
374
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
375
+ if not os.path.isdir(save_directory):
376
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
377
+ return
378
+ out_vocab_file = os.path.join(
379
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
380
+ )
381
+ out_merge_file = os.path.join(
382
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
383
+ )
384
+
385
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
386
+ copyfile(self.vocab_file, out_vocab_file)
387
+ elif not os.path.isfile(self.vocab_file):
388
+ with open(out_vocab_file, "wb") as fi:
389
+ content_spiece_model = self.sp_model.serialized_model_proto()
390
+ fi.write(content_spiece_model)
391
+
392
+ if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file):
393
+ copyfile(self.merges_file, out_merge_file)
394
+
395
+ return out_vocab_file, out_merge_file
396
+
397
+ # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
398
+ # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
399
+ # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
400
+ # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
401
+ # return ''.join(tokens_generated_so_far)
402
+
403
+ def add_from_file(self, f):
404
+ """
405
+ Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
406
+ """
407
+ if isinstance(f, str):
408
+ try:
409
+ with open(f, "r", encoding="utf-8") as fd:
410
+ self.add_from_file(fd)
411
+ except FileNotFoundError as fnfe:
412
+ raise fnfe
413
+ except UnicodeError:
414
+ raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
415
+ return
416
+
417
+ lines = f.readlines()
418
+ for lineTmp in lines:
419
+ line = lineTmp.strip()
420
+ idx = line.rfind(" ")
421
+ if idx == -1:
422
+ raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
423
+ word = line[:idx]
424
+ self.encoder[word] = len(self.encoder)
425
+
426
+
427
+ # Natural Language Toolkit: Twitter Tokenizer
428
+ #
429
+ # Copyright (C) 2001-2020 NLTK Project
430
+ # Author: Christopher Potts <cgpotts@stanford.edu>
431
+ # Ewan Klein <ewan@inf.ed.ac.uk> (modifications)
432
+ # Pierpaolo Pantone <> (modifications)
433
+ # URL: http://nltk.org/
434
+ # For license information, see LICENSE.TXT
435
+ #
436
+
437
+
438
+ """
439
+ Twitter-aware tokenizer, designed to be flexible and easy to adapt to new domains and tasks. The basic logic is this:
440
+
441
+ 1. The tuple regex_strings defines a list of regular expression strings.
442
+
443
+ 2. The regex_strings strings are put, in order, into a compiled regular expression object called word_re.
444
+
445
+ 3. The tokenization is done by word_re.findall(s), where s is the user-supplied string, inside the tokenize() method of
446
+ the class Tokenizer.
447
+
448
+ 4. When instantiating Tokenizer objects, there is a single option: preserve_case. By default, it is set to True. If it
449
+ is set to False, then the tokenizer will lowercase everything except for emoticons.
450
+
451
+ """
452
+
453
+
454
+ ######################################################################
455
+ #
456
+ # import regex # https://github.com/nltk/nltk/issues/2409
457
+ # import html
458
+ #
459
+ ######################################################################
460
+ # The following strings are components in the regular expression
461
+ # that is used for tokenizing. It's important that phone_number
462
+ # appears first in the final regex (since it can contain whitespace).
463
+ # It also could matter that tags comes after emoticons, due to the
464
+ # possibility of having text like
465
+ #
466
+ # <:| and some text >:)
467
+ #
468
+ # Most importantly, the final element should always be last, since it
469
+ # does a last ditch whitespace-based tokenization of whatever is left.
470
+
471
+ # ToDo: Update with http://en.wikipedia.org/wiki/List_of_emoticons ?
472
+
473
+ # This particular element is used in a couple ways, so we define it
474
+ # with a name:
475
+ # docstyle-ignore
476
+ EMOTICONS = r"""
477
+ (?:
478
+ [<>]?
479
+ [:;=8] # eyes
480
+ [\-o\*\']? # optional nose
481
+ [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
482
+ |
483
+ [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
484
+ [\-o\*\']? # optional nose
485
+ [:;=8] # eyes
486
+ [<>]?
487
+ |
488
+ <3 # heart
489
+ )"""
490
+
491
+ # URL pattern due to John Gruber, modified by Tom Winzig. See
492
+ # https://gist.github.com/winzig/8894715
493
+ # docstyle-ignore
494
+ URLS = r""" # Capture 1: entire matched URL
495
+ (?:
496
+ https?: # URL protocol and colon
497
+ (?:
498
+ /{1,3} # 1-3 slashes
499
+ | # or
500
+ [a-z0-9%] # Single letter or digit or '%'
501
+ # (Trying not to match e.g. "URI::Escape")
502
+ )
503
+ | # or
504
+ # looks like domain name followed by a slash:
505
+ [a-z0-9.\-]+[.]
506
+ (?:[a-z]{2,13})
507
+ /
508
+ )
509
+ (?: # One or more:
510
+ [^\s()<>{}\[\]]+ # Run of non-space, non-()<>{}[]
511
+ | # or
512
+ \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
513
+ |
514
+ \([^\s]+?\) # balanced parens, non-recursive: (...)
515
+ )+
516
+ (?: # End with:
517
+ \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
518
+ |
519
+ \([^\s]+?\) # balanced parens, non-recursive: (...)
520
+ | # or
521
+ [^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars
522
+ )
523
+ | # OR, the following to match naked domains:
524
+ (?:
525
+ (?<!@) # not preceded by a @, avoid matching foo@_gmail.com_
526
+ [a-z0-9]+
527
+ (?:[.\-][a-z0-9]+)*
528
+ [.]
529
+ (?:[a-z]{2,13})
530
+ \b
531
+ /?
532
+ (?!@) # not succeeded by a @,
533
+ # avoid matching "foo.na" in "foo.na@example.com"
534
+ )
535
+ """
536
+
537
+ # docstyle-ignore
538
+ # The components of the tokenizer:
539
+ REGEXPS = (
540
+ URLS,
541
+ # Phone numbers:
542
+ r"""
543
+ (?:
544
+ (?: # (international)
545
+ \+?[01]
546
+ [ *\-.\)]*
547
+ )?
548
+ (?: # (area code)
549
+ [\(]?
550
+ \d{3}
551
+ [ *\-.\)]*
552
+ )?
553
+ \d{3} # exchange
554
+ [ *\-.\)]*
555
+ \d{4} # base
556
+ )""",
557
+ # ASCII Emoticons
558
+ EMOTICONS,
559
+ # HTML tags:
560
+ r"""<[^>\s]+>""",
561
+ # ASCII Arrows
562
+ r"""[\-]+>|<[\-]+""",
563
+ # Twitter username:
564
+ r"""(?:@[\w_]+)""",
565
+ # Twitter hashtags:
566
+ r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""",
567
+ # email addresses
568
+ r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""",
569
+ # docstyle-ignore
570
+ # Remaining word types:
571
+ r"""
572
+ (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
573
+ |
574
+ (?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
575
+ |
576
+ (?:[\w_]+) # Words without apostrophes or dashes.
577
+ |
578
+ (?:\.(?:\s*\.){1,}) # Ellipsis dots.
579
+ |
580
+ (?:\S) # Everything else that isn't whitespace.
581
+ """,
582
+ )
583
+
584
+ ######################################################################
585
+ # This is the core tokenizing regex:
586
+
587
+ WORD_RE = regex.compile(r"""(%s)""" % "|".join(REGEXPS), regex.VERBOSE | regex.I | regex.UNICODE)
588
+
589
+ # WORD_RE performs poorly on these patterns:
590
+ HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}")
591
+
592
+ # The emoticon string gets its own regex so that we can preserve case for
593
+ # them as needed:
594
+ EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE)
595
+
596
+ # These are for regularizing HTML entities to Unicode:
597
+ ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);")
598
+
599
+
600
+ ######################################################################
601
+ # Functions for converting html entities
602
+ ######################################################################
603
+
604
+
605
+ def _str_to_unicode(text, encoding=None, errors="strict"):
606
+ if encoding is None:
607
+ encoding = "utf-8"
608
+ if isinstance(text, bytes):
609
+ return text.decode(encoding, errors)
610
+ return text
611
+
612
+
613
+ def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8"):
614
+ """
615
+ Remove entities from text by converting them to their corresponding unicode character.
616
+
617
+ Args:
618
+ text:
619
+ A unicode string or a byte string encoded in the given *encoding* (which defaults to 'utf-8').
620
+ keep (list):
621
+ List of entity names which should not be replaced. This supports both numeric entities (`&#nnnn;` and
622
+ `&#hhhh;`) and named entities (such as `&nbsp;` or `&gt;`).
623
+ remove_illegal (bool):
624
+ If `True`, entities that can't be converted are removed. Otherwise, entities that can't be converted are
625
+ kept "as is".
626
+
627
+ Returns: A unicode string with the entities removed.
628
+
629
+ See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py
630
+
631
+ Examples:
632
+
633
+ ```python
634
+ >>> from nltk.tokenize.casual import _replace_html_entities
635
+
636
+ >>> _replace_html_entities(b"Price: &pound;100")
637
+ 'Price: \\xa3100'
638
+
639
+ >>> print(_replace_html_entities(b"Price: &pound;100"))
640
+ Price: £100
641
+ ```"""
642
+
643
+ def _convert_entity(match):
644
+ entity_body = match.group(3)
645
+ if match.group(1):
646
+ try:
647
+ if match.group(2):
648
+ number = int(entity_body, 16)
649
+ else:
650
+ number = int(entity_body, 10)
651
+ # Numeric character references in the 80-9F range are typically
652
+ # interpreted by browsers as representing the characters mapped
653
+ # to bytes 80-9F in the Windows-1252 encoding. For more info
654
+ # see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets
655
+ if 0x80 <= number <= 0x9F:
656
+ return bytes((number,)).decode("cp1252")
657
+ except ValueError:
658
+ number = None
659
+ else:
660
+ if entity_body in keep:
661
+ return match.group(0)
662
+ else:
663
+ number = html.entities.name2codepoint.get(entity_body)
664
+ if number is not None:
665
+ try:
666
+ return chr(number)
667
+ except (ValueError, OverflowError):
668
+ pass
669
+
670
+ return "" if remove_illegal else match.group(0)
671
+
672
+ return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding))
673
+
674
+
675
+ ######################################################################
676
+
677
+
678
+ class TweetTokenizer:
679
+ r"""
680
+ Examples:
681
+
682
+ ```python
683
+ >>> # Tokenizer for tweets.
684
+ >>> from nltk.tokenize import TweetTokenizer
685
+
686
+ >>> tknzr = TweetTokenizer()
687
+ >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
688
+ >>> tknzr.tokenize(s0)
689
+ ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']
690
+
691
+ >>> # Examples using *strip_handles* and *reduce_len parameters*:
692
+ >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
693
+ >>> s1 = "@remy: This is waaaaayyyy too much for you!!!!!!"
694
+ >>> tknzr.tokenize(s1)
695
+ [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
696
+ ```"""
697
+
698
+ def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False):
699
+ self.preserve_case = preserve_case
700
+ self.reduce_len = reduce_len
701
+ self.strip_handles = strip_handles
702
+
703
+ def tokenize(self, text):
704
+ """
705
+ Args:
706
+ text: str
707
+
708
+ Returns: list(str) A tokenized list of strings; concatenating this list returns the original string if
709
+ `preserve_case=False`
710
+ """
711
+ # Fix HTML character entities:
712
+ text = _replace_html_entities(text)
713
+ # Remove username handles
714
+ if self.strip_handles:
715
+ text = remove_handles(text)
716
+ # Normalize word lengthening
717
+ if self.reduce_len:
718
+ text = reduce_lengthening(text)
719
+ # Shorten problematic sequences of characters
720
+ safe_text = HANG_RE.sub(r"\1\1\1", text)
721
+ # Tokenize:
722
+ words = WORD_RE.findall(safe_text)
723
+ # Possibly alter the case, but avoid changing emoticons like :D into :d:
724
+ if not self.preserve_case:
725
+ words = [x if EMOTICON_RE.search(x) else x.lower() for x in words]
726
+ return words
727
+
728
+
729
+ ######################################################################
730
+ # Normalization Functions
731
+ ######################################################################
732
+
733
+
734
+ def reduce_lengthening(text):
735
+ """
736
+ Replace repeated character sequences of length 3 or greater with sequences of length 3.
737
+ """
738
+ pattern = regex.compile(r"(.)\1{2,}")
739
+ return pattern.sub(r"\1\1\1", text)
740
+
741
+
742
+ def remove_handles(text):
743
+ """
744
+ Remove Twitter username handles from text.
745
+ """
746
+ pattern = regex.compile(
747
+ r"(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){20}(?!@))|(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){1,19})(?![A-Za-z0-9_]*@)"
748
+ )
749
+ # Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly
750
+ return pattern.sub(" ", text)
751
+
752
+
753
+ ######################################################################
754
+ # Tokenization Function
755
+ ######################################################################
756
+
757
+
758
+ def casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=False):
759
+ """
760
+ Convenience function for wrapping the tokenizer.
761
+ """
762
+ return TweetTokenizer(preserve_case=preserve_case, reduce_len=reduce_len, strip_handles=strip_handles).tokenize(
763
+ text
764
+ )
765
+
766
+
767
+ ###############################################################################
parrot/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert_fast.cpython-310.pyc ADDED
Binary file (6.77 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/convbert/configuration_convbert.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright The HuggingFace team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ConvBERT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class ConvBertConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`ConvBertModel`]. It is used to instantiate an
31
+ ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the ConvBERT
33
+ [YituTech/conv-bert-base](https://huggingface.co/YituTech/conv-bert-base) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 30522):
41
+ Vocabulary size of the ConvBERT model. Defines the number of different tokens that can be represented by
42
+ the `inputs_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`].
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ num_hidden_layers (`int`, *optional*, defaults to 12):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 12):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
54
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
55
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
56
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
57
+ The dropout ratio for the attention probabilities.
58
+ max_position_embeddings (`int`, *optional*, defaults to 512):
59
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
60
+ just in case (e.g., 512 or 1024 or 2048).
61
+ type_vocab_size (`int`, *optional*, defaults to 2):
62
+ The vocabulary size of the `token_type_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`].
63
+ initializer_range (`float`, *optional*, defaults to 0.02):
64
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
65
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
66
+ The epsilon used by the layer normalization layers.
67
+ head_ratio (`int`, *optional*, defaults to 2):
68
+ Ratio gamma to reduce the number of attention heads.
69
+ num_groups (`int`, *optional*, defaults to 1):
70
+ The number of groups for grouped linear layers for ConvBert model
71
+ conv_kernel_size (`int`, *optional*, defaults to 9):
72
+ The size of the convolutional kernel.
73
+ classifier_dropout (`float`, *optional*):
74
+ The dropout ratio for the classification head.
75
+
76
+ Example:
77
+
78
+ ```python
79
+ >>> from transformers import ConvBertConfig, ConvBertModel
80
+
81
+ >>> # Initializing a ConvBERT convbert-base-uncased style configuration
82
+ >>> configuration = ConvBertConfig()
83
+
84
+ >>> # Initializing a model (with random weights) from the convbert-base-uncased style configuration
85
+ >>> model = ConvBertModel(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "convbert"
92
+
93
+ def __init__(
94
+ self,
95
+ vocab_size=30522,
96
+ hidden_size=768,
97
+ num_hidden_layers=12,
98
+ num_attention_heads=12,
99
+ intermediate_size=3072,
100
+ hidden_act="gelu",
101
+ hidden_dropout_prob=0.1,
102
+ attention_probs_dropout_prob=0.1,
103
+ max_position_embeddings=512,
104
+ type_vocab_size=2,
105
+ initializer_range=0.02,
106
+ layer_norm_eps=1e-12,
107
+ pad_token_id=1,
108
+ bos_token_id=0,
109
+ eos_token_id=2,
110
+ embedding_size=768,
111
+ head_ratio=2,
112
+ conv_kernel_size=9,
113
+ num_groups=1,
114
+ classifier_dropout=None,
115
+ **kwargs,
116
+ ):
117
+ super().__init__(
118
+ pad_token_id=pad_token_id,
119
+ bos_token_id=bos_token_id,
120
+ eos_token_id=eos_token_id,
121
+ **kwargs,
122
+ )
123
+
124
+ self.vocab_size = vocab_size
125
+ self.hidden_size = hidden_size
126
+ self.num_hidden_layers = num_hidden_layers
127
+ self.num_attention_heads = num_attention_heads
128
+ self.intermediate_size = intermediate_size
129
+ self.hidden_act = hidden_act
130
+ self.hidden_dropout_prob = hidden_dropout_prob
131
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
132
+ self.max_position_embeddings = max_position_embeddings
133
+ self.type_vocab_size = type_vocab_size
134
+ self.initializer_range = initializer_range
135
+ self.layer_norm_eps = layer_norm_eps
136
+ self.embedding_size = embedding_size
137
+ self.head_ratio = head_ratio
138
+ self.conv_kernel_size = conv_kernel_size
139
+ self.num_groups = num_groups
140
+ self.classifier_dropout = classifier_dropout
141
+
142
+
143
+ # Copied from transformers.models.bert.configuration_bert.BertOnnxConfig
144
+ class ConvBertOnnxConfig(OnnxConfig):
145
+ @property
146
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
147
+ if self.task == "multiple-choice":
148
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
149
+ else:
150
+ dynamic_axis = {0: "batch", 1: "sequence"}
151
+ return OrderedDict(
152
+ [
153
+ ("input_ids", dynamic_axis),
154
+ ("attention_mask", dynamic_axis),
155
+ ("token_type_ids", dynamic_axis),
156
+ ]
157
+ )
parrot/lib/python3.10/site-packages/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ConvBERT checkpoint."""
16
+
17
+ import argparse
18
+
19
+ from transformers import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert
20
+ from transformers.utils import logging
21
+
22
+
23
+ logging.set_verbosity_info()
24
+
25
+
26
+ def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path):
27
+ conf = ConvBertConfig.from_json_file(convbert_config_file)
28
+ model = ConvBertModel(conf)
29
+
30
+ model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path)
31
+ model.save_pretrained(pytorch_dump_path)
32
+
33
+ tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True)
34
+ tf_model.save_pretrained(pytorch_dump_path)
35
+
36
+
37
+ if __name__ == "__main__":
38
+ parser = argparse.ArgumentParser()
39
+ # Required parameters
40
+ parser.add_argument(
41
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
42
+ )
43
+ parser.add_argument(
44
+ "--convbert_config_file",
45
+ default=None,
46
+ type=str,
47
+ required=True,
48
+ help=(
49
+ "The config json file corresponding to the pre-trained ConvBERT model. \n"
50
+ "This specifies the model architecture."
51
+ ),
52
+ )
53
+ parser.add_argument(
54
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
55
+ )
56
+ args = parser.parse_args()
57
+ convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path)
parrot/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for ConvBERT."""
16
+ import collections
17
+ import os
18
+ import unicodedata
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
28
+
29
+
30
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
31
+ def load_vocab(vocab_file):
32
+ """Loads a vocabulary file into a dictionary."""
33
+ vocab = collections.OrderedDict()
34
+ with open(vocab_file, "r", encoding="utf-8") as reader:
35
+ tokens = reader.readlines()
36
+ for index, token in enumerate(tokens):
37
+ token = token.rstrip("\n")
38
+ vocab[token] = index
39
+ return vocab
40
+
41
+
42
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
43
+ def whitespace_tokenize(text):
44
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
45
+ text = text.strip()
46
+ if not text:
47
+ return []
48
+ tokens = text.split()
49
+ return tokens
50
+
51
+
52
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->YituTech/conv-bert-base, ConvBertTokenizer->BertTokenizer, BERT->ConvBERT
53
+ class ConvBertTokenizer(PreTrainedTokenizer):
54
+ r"""
55
+ Construct a ConvBERT tokenizer. Based on WordPiece.
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ File containing the vocabulary.
63
+ do_lower_case (`bool`, *optional*, defaults to `True`):
64
+ Whether or not to lowercase the input when tokenizing.
65
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to do basic tokenization before WordPiece.
67
+ never_split (`Iterable`, *optional*):
68
+ Collection of tokens which will never be split during tokenization. Only has an effect when
69
+ `do_basic_tokenize=True`
70
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
71
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
72
+ token instead.
73
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
74
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
75
+ sequence classification or for a text and a question for question answering. It is also used as the last
76
+ token of a sequence built with special tokens.
77
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
78
+ The token used for padding, for example when batching sequences of different lengths.
79
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
80
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
81
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
82
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
83
+ The token used for masking values. This is the token used when training this model with masked language
84
+ modeling. This is the token which the model will try to predict.
85
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to tokenize Chinese characters.
87
+
88
+ This should likely be deactivated for Japanese (see this
89
+ [issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original ConvBERT).
93
+ """
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_file,
100
+ do_lower_case=True,
101
+ do_basic_tokenize=True,
102
+ never_split=None,
103
+ unk_token="[UNK]",
104
+ sep_token="[SEP]",
105
+ pad_token="[PAD]",
106
+ cls_token="[CLS]",
107
+ mask_token="[MASK]",
108
+ tokenize_chinese_chars=True,
109
+ strip_accents=None,
110
+ **kwargs,
111
+ ):
112
+ if not os.path.isfile(vocab_file):
113
+ raise ValueError(
114
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
115
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
116
+ )
117
+ self.vocab = load_vocab(vocab_file)
118
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
119
+ self.do_basic_tokenize = do_basic_tokenize
120
+ if do_basic_tokenize:
121
+ self.basic_tokenizer = BasicTokenizer(
122
+ do_lower_case=do_lower_case,
123
+ never_split=never_split,
124
+ tokenize_chinese_chars=tokenize_chinese_chars,
125
+ strip_accents=strip_accents,
126
+ )
127
+
128
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
129
+
130
+ super().__init__(
131
+ do_lower_case=do_lower_case,
132
+ do_basic_tokenize=do_basic_tokenize,
133
+ never_split=never_split,
134
+ unk_token=unk_token,
135
+ sep_token=sep_token,
136
+ pad_token=pad_token,
137
+ cls_token=cls_token,
138
+ mask_token=mask_token,
139
+ tokenize_chinese_chars=tokenize_chinese_chars,
140
+ strip_accents=strip_accents,
141
+ **kwargs,
142
+ )
143
+
144
+ @property
145
+ def do_lower_case(self):
146
+ return self.basic_tokenizer.do_lower_case
147
+
148
+ @property
149
+ def vocab_size(self):
150
+ return len(self.vocab)
151
+
152
+ def get_vocab(self):
153
+ return dict(self.vocab, **self.added_tokens_encoder)
154
+
155
+ def _tokenize(self, text, split_special_tokens=False):
156
+ split_tokens = []
157
+ if self.do_basic_tokenize:
158
+ for token in self.basic_tokenizer.tokenize(
159
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
160
+ ):
161
+ # If the token is part of the never_split set
162
+ if token in self.basic_tokenizer.never_split:
163
+ split_tokens.append(token)
164
+ else:
165
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
166
+ else:
167
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
168
+ return split_tokens
169
+
170
+ def _convert_token_to_id(self, token):
171
+ """Converts a token (str) in an id using the vocab."""
172
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
173
+
174
+ def _convert_id_to_token(self, index):
175
+ """Converts an index (integer) in a token (str) using the vocab."""
176
+ return self.ids_to_tokens.get(index, self.unk_token)
177
+
178
+ def convert_tokens_to_string(self, tokens):
179
+ """Converts a sequence of tokens (string) in a single string."""
180
+ out_string = " ".join(tokens).replace(" ##", "").strip()
181
+ return out_string
182
+
183
+ def build_inputs_with_special_tokens(
184
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
185
+ ) -> List[int]:
186
+ """
187
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
188
+ adding special tokens. A ConvBERT sequence has the following format:
189
+
190
+ - single sequence: `[CLS] X [SEP]`
191
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
192
+
193
+ Args:
194
+ token_ids_0 (`List[int]`):
195
+ List of IDs to which the special tokens will be added.
196
+ token_ids_1 (`List[int]`, *optional*):
197
+ Optional second list of IDs for sequence pairs.
198
+
199
+ Returns:
200
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
201
+ """
202
+ if token_ids_1 is None:
203
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
204
+ cls = [self.cls_token_id]
205
+ sep = [self.sep_token_id]
206
+ return cls + token_ids_0 + sep + token_ids_1 + sep
207
+
208
+ def get_special_tokens_mask(
209
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
210
+ ) -> List[int]:
211
+ """
212
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
213
+ special tokens using the tokenizer `prepare_for_model` method.
214
+
215
+ Args:
216
+ token_ids_0 (`List[int]`):
217
+ List of IDs.
218
+ token_ids_1 (`List[int]`, *optional*):
219
+ Optional second list of IDs for sequence pairs.
220
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
221
+ Whether or not the token list is already formatted with special tokens for the model.
222
+
223
+ Returns:
224
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
225
+ """
226
+
227
+ if already_has_special_tokens:
228
+ return super().get_special_tokens_mask(
229
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
230
+ )
231
+
232
+ if token_ids_1 is not None:
233
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
234
+ return [1] + ([0] * len(token_ids_0)) + [1]
235
+
236
+ def create_token_type_ids_from_sequences(
237
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
238
+ ) -> List[int]:
239
+ """
240
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT sequence
241
+ pair mask has the following format:
242
+
243
+ ```
244
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
245
+ | first sequence | second sequence |
246
+ ```
247
+
248
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
249
+
250
+ Args:
251
+ token_ids_0 (`List[int]`):
252
+ List of IDs.
253
+ token_ids_1 (`List[int]`, *optional*):
254
+ Optional second list of IDs for sequence pairs.
255
+
256
+ Returns:
257
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
258
+ """
259
+ sep = [self.sep_token_id]
260
+ cls = [self.cls_token_id]
261
+ if token_ids_1 is None:
262
+ return len(cls + token_ids_0 + sep) * [0]
263
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
264
+
265
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
266
+ index = 0
267
+ if os.path.isdir(save_directory):
268
+ vocab_file = os.path.join(
269
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
270
+ )
271
+ else:
272
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
273
+ with open(vocab_file, "w", encoding="utf-8") as writer:
274
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
275
+ if index != token_index:
276
+ logger.warning(
277
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
278
+ " Please check that the vocabulary is not corrupted!"
279
+ )
280
+ index = token_index
281
+ writer.write(token + "\n")
282
+ index += 1
283
+ return (vocab_file,)
284
+
285
+
286
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
287
+ class BasicTokenizer(object):
288
+ """
289
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
290
+
291
+ Args:
292
+ do_lower_case (`bool`, *optional*, defaults to `True`):
293
+ Whether or not to lowercase the input when tokenizing.
294
+ never_split (`Iterable`, *optional*):
295
+ Collection of tokens which will never be split during tokenization. Only has an effect when
296
+ `do_basic_tokenize=True`
297
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
298
+ Whether or not to tokenize Chinese characters.
299
+
300
+ This should likely be deactivated for Japanese (see this
301
+ [issue](https://github.com/huggingface/transformers/issues/328)).
302
+ strip_accents (`bool`, *optional*):
303
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
304
+ value for `lowercase` (as in the original BERT).
305
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
306
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
307
+ the full context of the words, such as contractions.
308
+ """
309
+
310
+ def __init__(
311
+ self,
312
+ do_lower_case=True,
313
+ never_split=None,
314
+ tokenize_chinese_chars=True,
315
+ strip_accents=None,
316
+ do_split_on_punc=True,
317
+ ):
318
+ if never_split is None:
319
+ never_split = []
320
+ self.do_lower_case = do_lower_case
321
+ self.never_split = set(never_split)
322
+ self.tokenize_chinese_chars = tokenize_chinese_chars
323
+ self.strip_accents = strip_accents
324
+ self.do_split_on_punc = do_split_on_punc
325
+
326
+ def tokenize(self, text, never_split=None):
327
+ """
328
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
329
+
330
+ Args:
331
+ never_split (`List[str]`, *optional*)
332
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
333
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
334
+ """
335
+ # union() returns a new set by concatenating the two sets.
336
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
337
+ text = self._clean_text(text)
338
+
339
+ # This was added on November 1st, 2018 for the multilingual and Chinese
340
+ # models. This is also applied to the English models now, but it doesn't
341
+ # matter since the English models were not trained on any Chinese data
342
+ # and generally don't have any Chinese data in them (there are Chinese
343
+ # characters in the vocabulary because Wikipedia does have some Chinese
344
+ # words in the English Wikipedia.).
345
+ if self.tokenize_chinese_chars:
346
+ text = self._tokenize_chinese_chars(text)
347
+ # prevents treating the same character with different unicode codepoints as different characters
348
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
349
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
350
+ split_tokens = []
351
+ for token in orig_tokens:
352
+ if token not in never_split:
353
+ if self.do_lower_case:
354
+ token = token.lower()
355
+ if self.strip_accents is not False:
356
+ token = self._run_strip_accents(token)
357
+ elif self.strip_accents:
358
+ token = self._run_strip_accents(token)
359
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
360
+
361
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
362
+ return output_tokens
363
+
364
+ def _run_strip_accents(self, text):
365
+ """Strips accents from a piece of text."""
366
+ text = unicodedata.normalize("NFD", text)
367
+ output = []
368
+ for char in text:
369
+ cat = unicodedata.category(char)
370
+ if cat == "Mn":
371
+ continue
372
+ output.append(char)
373
+ return "".join(output)
374
+
375
+ def _run_split_on_punc(self, text, never_split=None):
376
+ """Splits punctuation on a piece of text."""
377
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
378
+ return [text]
379
+ chars = list(text)
380
+ i = 0
381
+ start_new_word = True
382
+ output = []
383
+ while i < len(chars):
384
+ char = chars[i]
385
+ if _is_punctuation(char):
386
+ output.append([char])
387
+ start_new_word = True
388
+ else:
389
+ if start_new_word:
390
+ output.append([])
391
+ start_new_word = False
392
+ output[-1].append(char)
393
+ i += 1
394
+
395
+ return ["".join(x) for x in output]
396
+
397
+ def _tokenize_chinese_chars(self, text):
398
+ """Adds whitespace around any CJK character."""
399
+ output = []
400
+ for char in text:
401
+ cp = ord(char)
402
+ if self._is_chinese_char(cp):
403
+ output.append(" ")
404
+ output.append(char)
405
+ output.append(" ")
406
+ else:
407
+ output.append(char)
408
+ return "".join(output)
409
+
410
+ def _is_chinese_char(self, cp):
411
+ """Checks whether CP is the codepoint of a CJK character."""
412
+ # This defines a "chinese character" as anything in the CJK Unicode block:
413
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
414
+ #
415
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
416
+ # despite its name. The modern Korean Hangul alphabet is a different block,
417
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
418
+ # space-separated words, so they are not treated specially and handled
419
+ # like the all of the other languages.
420
+ if (
421
+ (cp >= 0x4E00 and cp <= 0x9FFF)
422
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
423
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
424
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
425
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
426
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
427
+ or (cp >= 0xF900 and cp <= 0xFAFF)
428
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
429
+ ): #
430
+ return True
431
+
432
+ return False
433
+
434
+ def _clean_text(self, text):
435
+ """Performs invalid character removal and whitespace cleanup on text."""
436
+ output = []
437
+ for char in text:
438
+ cp = ord(char)
439
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
440
+ continue
441
+ if _is_whitespace(char):
442
+ output.append(" ")
443
+ else:
444
+ output.append(char)
445
+ return "".join(output)
446
+
447
+
448
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
449
+ class WordpieceTokenizer(object):
450
+ """Runs WordPiece tokenization."""
451
+
452
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
453
+ self.vocab = vocab
454
+ self.unk_token = unk_token
455
+ self.max_input_chars_per_word = max_input_chars_per_word
456
+
457
+ def tokenize(self, text):
458
+ """
459
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
460
+ tokenization using the given vocabulary.
461
+
462
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
463
+
464
+ Args:
465
+ text: A single token or whitespace separated tokens. This should have
466
+ already been passed through *BasicTokenizer*.
467
+
468
+ Returns:
469
+ A list of wordpiece tokens.
470
+ """
471
+
472
+ output_tokens = []
473
+ for token in whitespace_tokenize(text):
474
+ chars = list(token)
475
+ if len(chars) > self.max_input_chars_per_word:
476
+ output_tokens.append(self.unk_token)
477
+ continue
478
+
479
+ is_bad = False
480
+ start = 0
481
+ sub_tokens = []
482
+ while start < len(chars):
483
+ end = len(chars)
484
+ cur_substr = None
485
+ while start < end:
486
+ substr = "".join(chars[start:end])
487
+ if start > 0:
488
+ substr = "##" + substr
489
+ if substr in self.vocab:
490
+ cur_substr = substr
491
+ break
492
+ end -= 1
493
+ if cur_substr is None:
494
+ is_bad = True
495
+ break
496
+ sub_tokens.append(cur_substr)
497
+ start = end
498
+
499
+ if is_bad:
500
+ output_tokens.append(self.unk_token)
501
+ else:
502
+ output_tokens.extend(sub_tokens)
503
+ return output_tokens
parrot/lib/python3.10/site-packages/transformers/models/dinat/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_dinat": ["DinatConfig"]}
20
+
21
+
22
+ try:
23
+ if not is_torch_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["modeling_dinat"] = [
29
+ "DinatForImageClassification",
30
+ "DinatModel",
31
+ "DinatPreTrainedModel",
32
+ "DinatBackbone",
33
+ ]
34
+
35
+ if TYPE_CHECKING:
36
+ from .configuration_dinat import DinatConfig
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ from .modeling_dinat import (
45
+ DinatBackbone,
46
+ DinatForImageClassification,
47
+ DinatModel,
48
+ DinatPreTrainedModel,
49
+ )
50
+
51
+ else:
52
+ import sys
53
+
54
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
parrot/lib/python3.10/site-packages/transformers/models/groupvit/__init__.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_groupvit": [
21
+ "GroupViTConfig",
22
+ "GroupViTOnnxConfig",
23
+ "GroupViTTextConfig",
24
+ "GroupViTVisionConfig",
25
+ ],
26
+ }
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_groupvit"] = [
35
+ "GroupViTModel",
36
+ "GroupViTPreTrainedModel",
37
+ "GroupViTTextModel",
38
+ "GroupViTVisionModel",
39
+ ]
40
+
41
+ try:
42
+ if not is_tf_available():
43
+ raise OptionalDependencyNotAvailable()
44
+ except OptionalDependencyNotAvailable:
45
+ pass
46
+ else:
47
+ _import_structure["modeling_tf_groupvit"] = [
48
+ "TFGroupViTModel",
49
+ "TFGroupViTPreTrainedModel",
50
+ "TFGroupViTTextModel",
51
+ "TFGroupViTVisionModel",
52
+ ]
53
+
54
+ if TYPE_CHECKING:
55
+ from .configuration_groupvit import (
56
+ GroupViTConfig,
57
+ GroupViTOnnxConfig,
58
+ GroupViTTextConfig,
59
+ GroupViTVisionConfig,
60
+ )
61
+
62
+ try:
63
+ if not is_torch_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .modeling_groupvit import (
69
+ GroupViTModel,
70
+ GroupViTPreTrainedModel,
71
+ GroupViTTextModel,
72
+ GroupViTVisionModel,
73
+ )
74
+
75
+ try:
76
+ if not is_tf_available():
77
+ raise OptionalDependencyNotAvailable()
78
+ except OptionalDependencyNotAvailable:
79
+ pass
80
+ else:
81
+ from .modeling_tf_groupvit import (
82
+ TFGroupViTModel,
83
+ TFGroupViTPreTrainedModel,
84
+ TFGroupViTTextModel,
85
+ TFGroupViTVisionModel,
86
+ )
87
+
88
+ else:
89
+ import sys
90
+
91
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
parrot/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/configuration_groupvit.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/convert_groupvit_nvlab_to_hf.cpython-310.pyc ADDED
Binary file (5.84 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_tf_groupvit.cpython-310.pyc ADDED
Binary file (63.5 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GroupViT model configuration"""
16
+
17
+ import os
18
+ from collections import OrderedDict
19
+ from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from ...processing_utils import ProcessorMixin
28
+ from ...utils import TensorType
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ class GroupViTTextConfig(PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`GroupViTTextModel`]. It is used to instantiate an
37
+ GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration
38
+ with the defaults will yield a similar configuration to that of the GroupViT
39
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+ Args:
45
+ vocab_size (`int`, *optional*, defaults to 49408):
46
+ Vocabulary size of the GroupViT text model. Defines the number of different tokens that can be represented
47
+ by the `inputs_ids` passed when calling [`GroupViTModel`].
48
+ hidden_size (`int`, *optional*, defaults to 256):
49
+ Dimensionality of the encoder layers and the pooler layer.
50
+ intermediate_size (`int`, *optional*, defaults to 1024):
51
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
52
+ num_hidden_layers (`int`, *optional*, defaults to 12):
53
+ Number of hidden layers in the Transformer encoder.
54
+ num_attention_heads (`int`, *optional*, defaults to 4):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ max_position_embeddings (`int`, *optional*, defaults to 77):
57
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
58
+ just in case (e.g., 512 or 1024 or 2048).
59
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
60
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
61
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
62
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
63
+ The epsilon used by the layer normalization layers.
64
+ attention_dropout (`float`, *optional*, defaults to 0.0):
65
+ The dropout ratio for the attention probabilities.
66
+ dropout (`float`, *optional*, defaults to 0.0):
67
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
68
+ initializer_range (`float`, *optional*, defaults to 0.02):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ initializer_factor (`float`, *optional*, defaults to 1.0):
71
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
72
+ testing).
73
+
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import GroupViTTextConfig, GroupViTTextModel
78
+
79
+ >>> # Initializing a GroupViTTextModel with nvidia/groupvit-gcc-yfcc style configuration
80
+ >>> configuration = GroupViTTextConfig()
81
+
82
+ >>> model = GroupViTTextModel(configuration)
83
+
84
+ >>> # Accessing the model configuration
85
+ >>> configuration = model.config
86
+ ```"""
87
+
88
+ model_type = "groupvit_text_model"
89
+
90
+ def __init__(
91
+ self,
92
+ vocab_size=49408,
93
+ hidden_size=256,
94
+ intermediate_size=1024,
95
+ num_hidden_layers=12,
96
+ num_attention_heads=4,
97
+ max_position_embeddings=77,
98
+ hidden_act="quick_gelu",
99
+ layer_norm_eps=1e-5,
100
+ dropout=0.0,
101
+ attention_dropout=0.0,
102
+ initializer_range=0.02,
103
+ initializer_factor=1.0,
104
+ pad_token_id=1,
105
+ bos_token_id=49406,
106
+ eos_token_id=49407,
107
+ **kwargs,
108
+ ):
109
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
110
+
111
+ self.vocab_size = vocab_size
112
+ self.hidden_size = hidden_size
113
+ self.intermediate_size = intermediate_size
114
+ self.dropout = dropout
115
+ self.num_hidden_layers = num_hidden_layers
116
+ self.num_attention_heads = num_attention_heads
117
+ self.max_position_embeddings = max_position_embeddings
118
+ self.layer_norm_eps = layer_norm_eps
119
+ self.hidden_act = hidden_act
120
+ self.initializer_range = initializer_range
121
+ self.initializer_factor = initializer_factor
122
+ self.attention_dropout = attention_dropout
123
+
124
+ @classmethod
125
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
126
+ cls._set_token_in_kwargs(kwargs)
127
+
128
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
129
+
130
+ # get the text config dict if we are loading from GroupViTConfig
131
+ if config_dict.get("model_type") == "groupvit":
132
+ config_dict = config_dict["text_config"]
133
+
134
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
135
+ logger.warning(
136
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
137
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
138
+ )
139
+
140
+ return cls.from_dict(config_dict, **kwargs)
141
+
142
+
143
+ class GroupViTVisionConfig(PretrainedConfig):
144
+ r"""
145
+ This is the configuration class to store the configuration of a [`GroupViTVisionModel`]. It is used to instantiate
146
+ an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a
147
+ configuration with the defaults will yield a similar configuration to that of the GroupViT
148
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
149
+
150
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
151
+ documentation from [`PretrainedConfig`] for more information.
152
+
153
+ Args:
154
+ hidden_size (`int`, *optional*, defaults to 384):
155
+ Dimensionality of the encoder layers and the pooler layer.
156
+ intermediate_size (`int`, *optional*, defaults to 1536):
157
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
158
+ depths (`List[int]`, *optional*, defaults to [6, 3, 3]):
159
+ The number of layers in each encoder block.
160
+ num_group_tokens (`List[int]`, *optional*, defaults to [64, 8, 0]):
161
+ The number of group tokens for each stage.
162
+ num_output_groups (`List[int]`, *optional*, defaults to [64, 8, 8]):
163
+ The number of output groups for each stage, 0 means no group.
164
+ num_attention_heads (`int`, *optional*, defaults to 6):
165
+ Number of attention heads for each attention layer in the Transformer encoder.
166
+ image_size (`int`, *optional*, defaults to 224):
167
+ The size (resolution) of each image.
168
+ patch_size (`int`, *optional*, defaults to 16):
169
+ The size (resolution) of each patch.
170
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
171
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
172
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
173
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
174
+ The epsilon used by the layer normalization layers.
175
+ dropout (`float`, *optional*, defaults to 0.0):
176
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
177
+ attention_dropout (`float`, *optional*, defaults to 0.0):
178
+ The dropout ratio for the attention probabilities.
179
+ initializer_range (`float`, *optional*, defaults to 0.02):
180
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
181
+ initializer_factor (`float`, *optional*, defaults to 1.0):
182
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
183
+ testing).
184
+
185
+ Example:
186
+
187
+ ```python
188
+ >>> from transformers import GroupViTVisionConfig, GroupViTVisionModel
189
+
190
+ >>> # Initializing a GroupViTVisionModel with nvidia/groupvit-gcc-yfcc style configuration
191
+ >>> configuration = GroupViTVisionConfig()
192
+
193
+ >>> model = GroupViTVisionModel(configuration)
194
+
195
+ >>> # Accessing the model configuration
196
+ >>> configuration = model.config
197
+ ```"""
198
+
199
+ model_type = "groupvit_vision_model"
200
+
201
+ def __init__(
202
+ self,
203
+ hidden_size=384,
204
+ intermediate_size=1536,
205
+ depths=[6, 3, 3],
206
+ num_hidden_layers=12,
207
+ num_group_tokens=[64, 8, 0],
208
+ num_output_groups=[64, 8, 8],
209
+ num_attention_heads=6,
210
+ image_size=224,
211
+ patch_size=16,
212
+ num_channels=3,
213
+ hidden_act="gelu",
214
+ layer_norm_eps=1e-5,
215
+ dropout=0.0,
216
+ attention_dropout=0.0,
217
+ initializer_range=0.02,
218
+ initializer_factor=1.0,
219
+ assign_eps=1.0,
220
+ assign_mlp_ratio=[0.5, 4],
221
+ **kwargs,
222
+ ):
223
+ super().__init__(**kwargs)
224
+
225
+ self.hidden_size = hidden_size
226
+ self.intermediate_size = intermediate_size
227
+ self.depths = depths
228
+ if num_hidden_layers != sum(depths):
229
+ logger.warning(
230
+ f"Manually setting num_hidden_layers to {num_hidden_layers}, but we expect num_hidden_layers ="
231
+ f" sum(depth) = {sum(depths)}"
232
+ )
233
+ self.num_hidden_layers = num_hidden_layers
234
+ self.num_group_tokens = num_group_tokens
235
+ self.num_output_groups = num_output_groups
236
+ self.num_attention_heads = num_attention_heads
237
+ self.image_size = image_size
238
+ self.patch_size = patch_size
239
+ self.num_channels = num_channels
240
+ self.hidden_act = hidden_act
241
+ self.layer_norm_eps = layer_norm_eps
242
+ self.dropout = dropout
243
+ self.attention_dropout = attention_dropout
244
+ self.initializer_range = initializer_range
245
+ self.initializer_factor = initializer_factor
246
+ self.assign_eps = assign_eps
247
+ self.assign_mlp_ratio = assign_mlp_ratio
248
+
249
+ @classmethod
250
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
251
+ cls._set_token_in_kwargs(kwargs)
252
+
253
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
254
+
255
+ # get the vision config dict if we are loading from GroupViTConfig
256
+ if config_dict.get("model_type") == "groupvit":
257
+ config_dict = config_dict["vision_config"]
258
+
259
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
260
+ logger.warning(
261
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
262
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
263
+ )
264
+
265
+ return cls.from_dict(config_dict, **kwargs)
266
+
267
+
268
+ class GroupViTConfig(PretrainedConfig):
269
+ r"""
270
+ [`GroupViTConfig`] is the configuration class to store the configuration of a [`GroupViTModel`]. It is used to
271
+ instantiate a GroupViT model according to the specified arguments, defining the text model and vision model
272
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT
273
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
274
+
275
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
276
+ documentation from [`PretrainedConfig`] for more information.
277
+
278
+ Args:
279
+ text_config (`dict`, *optional*):
280
+ Dictionary of configuration options used to initialize [`GroupViTTextConfig`].
281
+ vision_config (`dict`, *optional*):
282
+ Dictionary of configuration options used to initialize [`GroupViTVisionConfig`].
283
+ projection_dim (`int`, *optional*, defaults to 256):
284
+ Dimentionality of text and vision projection layers.
285
+ projection_intermediate_dim (`int`, *optional*, defaults to 4096):
286
+ Dimentionality of intermediate layer of text and vision projection layers.
287
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
288
+ The inital value of the *logit_scale* parameter. Default is used as per the original GroupViT
289
+ implementation.
290
+ kwargs (*optional*):
291
+ Dictionary of keyword arguments.
292
+ """
293
+
294
+ model_type = "groupvit"
295
+
296
+ def __init__(
297
+ self,
298
+ text_config=None,
299
+ vision_config=None,
300
+ projection_dim=256,
301
+ projection_intermediate_dim=4096,
302
+ logit_scale_init_value=2.6592,
303
+ **kwargs,
304
+ ):
305
+ # If `_config_dict` exist, we use them for the backward compatibility.
306
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
307
+ # of confusion!).
308
+ text_config_dict = kwargs.pop("text_config_dict", None)
309
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
310
+
311
+ super().__init__(**kwargs)
312
+
313
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
314
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
315
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
316
+ if text_config_dict is not None:
317
+ if text_config is None:
318
+ text_config = {}
319
+
320
+ # This is the complete result when using `text_config_dict`.
321
+ _text_config_dict = GroupViTTextConfig(**text_config_dict).to_dict()
322
+
323
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
324
+ for key, value in _text_config_dict.items():
325
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
326
+ # If specified in `text_config_dict`
327
+ if key in text_config_dict:
328
+ message = (
329
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
330
+ f'The value `text_config_dict["{key}"]` will be used instead.'
331
+ )
332
+ # If inferred from default argument values (just to be super careful)
333
+ else:
334
+ message = (
335
+ f"`text_config_dict` is provided which will be used to initialize `GroupViTTextConfig`. "
336
+ f'The value `text_config["{key}"]` will be overriden.'
337
+ )
338
+ logger.info(message)
339
+
340
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
341
+ text_config.update(_text_config_dict)
342
+
343
+ if vision_config_dict is not None:
344
+ if vision_config is None:
345
+ vision_config = {}
346
+
347
+ # This is the complete result when using `vision_config_dict`.
348
+ _vision_config_dict = GroupViTVisionConfig(**vision_config_dict).to_dict()
349
+ # convert keys to string instead of integer
350
+ if "id2label" in _vision_config_dict:
351
+ _vision_config_dict["id2label"] = {
352
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
353
+ }
354
+
355
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
356
+ for key, value in _vision_config_dict.items():
357
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
358
+ # If specified in `vision_config_dict`
359
+ if key in vision_config_dict:
360
+ message = (
361
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
362
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
363
+ )
364
+ # If inferred from default argument values (just to be super careful)
365
+ else:
366
+ message = (
367
+ f"`vision_config_dict` is provided which will be used to initialize `GroupViTVisionConfig`."
368
+ f' The value `vision_config["{key}"]` will be overriden.'
369
+ )
370
+ logger.info(message)
371
+
372
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
373
+ vision_config.update(_vision_config_dict)
374
+
375
+ if text_config is None:
376
+ text_config = {}
377
+ logger.info("`text_config` is `None`. Initializing the `GroupViTTextConfig` with default values.")
378
+
379
+ if vision_config is None:
380
+ vision_config = {}
381
+ logger.info("`vision_config` is `None`. initializing the `GroupViTVisionConfig` with default values.")
382
+
383
+ self.text_config = GroupViTTextConfig(**text_config)
384
+ self.vision_config = GroupViTVisionConfig(**vision_config)
385
+
386
+ self.projection_dim = projection_dim
387
+ self.projection_intermediate_dim = projection_intermediate_dim
388
+ self.logit_scale_init_value = logit_scale_init_value
389
+ self.initializer_range = 0.02
390
+ self.initializer_factor = 1.0
391
+ self.output_segmentation = False
392
+
393
+ @classmethod
394
+ def from_text_vision_configs(cls, text_config: GroupViTTextConfig, vision_config: GroupViTVisionConfig, **kwargs):
395
+ r"""
396
+ Instantiate a [`GroupViTConfig`] (or a derived class) from groupvit text model configuration and groupvit
397
+ vision model configuration.
398
+
399
+ Returns:
400
+ [`GroupViTConfig`]: An instance of a configuration object
401
+ """
402
+
403
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
404
+
405
+
406
+ class GroupViTOnnxConfig(OnnxConfig):
407
+ @property
408
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
409
+ return OrderedDict(
410
+ [
411
+ ("input_ids", {0: "batch", 1: "sequence"}),
412
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
413
+ ("attention_mask", {0: "batch", 1: "sequence"}),
414
+ ]
415
+ )
416
+
417
+ @property
418
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
419
+ return OrderedDict(
420
+ [
421
+ ("logits_per_image", {0: "batch"}),
422
+ ("logits_per_text", {0: "batch"}),
423
+ ("text_embeds", {0: "batch"}),
424
+ ("image_embeds", {0: "batch"}),
425
+ ]
426
+ )
427
+
428
+ @property
429
+ def atol_for_validation(self) -> float:
430
+ return 1e-4
431
+
432
+ def generate_dummy_inputs(
433
+ self,
434
+ processor: "ProcessorMixin",
435
+ batch_size: int = -1,
436
+ seq_length: int = -1,
437
+ framework: Optional["TensorType"] = None,
438
+ ) -> Mapping[str, Any]:
439
+ text_input_dict = super().generate_dummy_inputs(
440
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
441
+ )
442
+ image_input_dict = super().generate_dummy_inputs(
443
+ processor.image_processor, batch_size=batch_size, framework=framework
444
+ )
445
+ return {**text_input_dict, **image_input_dict}
446
+
447
+ @property
448
+ def default_onnx_opset(self) -> int:
449
+ return 14
parrot/lib/python3.10/site-packages/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Convert GroupViT checkpoints from the original repository.
18
+
19
+ URL: https://github.com/NVlabs/GroupViT
20
+ """
21
+
22
+ import argparse
23
+
24
+ import requests
25
+ import torch
26
+ from PIL import Image
27
+
28
+ from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
29
+
30
+
31
+ def rename_key(name):
32
+ # vision encoder
33
+ if "img_encoder.pos_embed" in name:
34
+ name = name.replace("img_encoder.pos_embed", "vision_model.embeddings.position_embeddings")
35
+ if "img_encoder.patch_embed.proj" in name:
36
+ name = name.replace("img_encoder.patch_embed.proj", "vision_model.embeddings.patch_embeddings.projection")
37
+ if "img_encoder.patch_embed.norm" in name:
38
+ name = name.replace("img_encoder.patch_embed.norm", "vision_model.embeddings.layernorm")
39
+ if "img_encoder.layers" in name:
40
+ name = name.replace("img_encoder.layers", "vision_model.encoder.stages")
41
+ if "blocks" in name and "res" not in name:
42
+ name = name.replace("blocks", "layers")
43
+ if "attn" in name and "pre_assign" not in name:
44
+ name = name.replace("attn", "self_attn")
45
+ if "proj" in name and "self_attn" in name and "text" not in name:
46
+ name = name.replace("proj", "out_proj")
47
+ if "pre_assign_attn.attn.proj" in name:
48
+ name = name.replace("pre_assign_attn.attn.proj", "pre_assign_attn.attn.out_proj")
49
+ if "norm1" in name:
50
+ name = name.replace("norm1", "layer_norm1")
51
+ if "norm2" in name and "pre_assign" not in name:
52
+ name = name.replace("norm2", "layer_norm2")
53
+ if "img_encoder.norm" in name:
54
+ name = name.replace("img_encoder.norm", "vision_model.layernorm")
55
+ # text encoder
56
+ if "text_encoder.token_embedding" in name:
57
+ name = name.replace("text_encoder.token_embedding", "text_model.embeddings.token_embedding")
58
+ if "text_encoder.positional_embedding" in name:
59
+ name = name.replace("text_encoder.positional_embedding", "text_model.embeddings.position_embedding.weight")
60
+ if "text_encoder.transformer.resblocks." in name:
61
+ name = name.replace("text_encoder.transformer.resblocks.", "text_model.encoder.layers.")
62
+ if "ln_1" in name:
63
+ name = name.replace("ln_1", "layer_norm1")
64
+ if "ln_2" in name:
65
+ name = name.replace("ln_2", "layer_norm2")
66
+ if "c_fc" in name:
67
+ name = name.replace("c_fc", "fc1")
68
+ if "c_proj" in name:
69
+ name = name.replace("c_proj", "fc2")
70
+ if "text_encoder" in name:
71
+ name = name.replace("text_encoder", "text_model")
72
+ if "ln_final" in name:
73
+ name = name.replace("ln_final", "final_layer_norm")
74
+ # projection layers
75
+ if "img_projector.linear_hidden." in name:
76
+ name = name.replace("img_projector.linear_hidden.", "visual_projection.")
77
+ if "img_projector.linear_out." in name:
78
+ name = name.replace("img_projector.linear_out.", "visual_projection.3.")
79
+ if "text_projector.linear_hidden" in name:
80
+ name = name.replace("text_projector.linear_hidden", "text_projection")
81
+ if "text_projector.linear_out" in name:
82
+ name = name.replace("text_projector.linear_out", "text_projection.3")
83
+
84
+ return name
85
+
86
+
87
+ def convert_state_dict(orig_state_dict, config):
88
+ for key in orig_state_dict.copy().keys():
89
+ val = orig_state_dict.pop(key)
90
+
91
+ if "qkv" in key:
92
+ # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
93
+ # we need to split them up into separate matrices/vectors
94
+ key_split = key.split(".")
95
+ stage_num, layer_num = int(key_split[2]), int(key_split[4])
96
+ dim = config.vision_config.hidden_size
97
+ if "weight" in key:
98
+ orig_state_dict[
99
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.weight"
100
+ ] = val[:dim, :]
101
+ orig_state_dict[
102
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.weight"
103
+ ] = val[dim : dim * 2, :]
104
+ orig_state_dict[
105
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.weight"
106
+ ] = val[-dim:, :]
107
+ else:
108
+ orig_state_dict[
109
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.bias"
110
+ ] = val[:dim]
111
+ orig_state_dict[
112
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.bias"
113
+ ] = val[dim : dim * 2]
114
+ orig_state_dict[
115
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.bias"
116
+ ] = val[-dim:]
117
+ elif "in_proj" in key:
118
+ # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
119
+ # we need to split them up into separate matrices/vectors
120
+ key_split = key.split(".")
121
+ layer_num = int(key_split[3])
122
+ dim = config.text_config.hidden_size
123
+ if "weight" in key:
124
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :]
125
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[
126
+ dim : dim * 2, :
127
+ ]
128
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :]
129
+ else:
130
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim]
131
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2]
132
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:]
133
+ else:
134
+ new_name = rename_key(key)
135
+ # squeeze if necessary
136
+ if (
137
+ "text_projection.0" in new_name
138
+ or "text_projection.3" in new_name
139
+ or "visual_projection.0" in new_name
140
+ or "visual_projection.3" in new_name
141
+ ):
142
+ orig_state_dict[new_name] = val.squeeze_()
143
+ else:
144
+ orig_state_dict[new_name] = val
145
+
146
+ return orig_state_dict
147
+
148
+
149
+ # We will verify our results on an image of cute cats
150
+ def prepare_img():
151
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
152
+ im = Image.open(requests.get(url, stream=True).raw)
153
+ return im
154
+
155
+
156
+ @torch.no_grad()
157
+ def convert_groupvit_checkpoint(
158
+ checkpoint_path, pytorch_dump_folder_path, model_name="groupvit-gcc-yfcc", push_to_hub=False
159
+ ):
160
+ """
161
+ Copy/paste/tweak model's weights to the Transformers design.
162
+ """
163
+ config = GroupViTConfig()
164
+ model = GroupViTModel(config).eval()
165
+
166
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
167
+ new_state_dict = convert_state_dict(state_dict, config)
168
+ missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
169
+ assert missing_keys == ["text_model.embeddings.position_ids"]
170
+ assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(unexpected_keys) == 0)
171
+
172
+ # verify result
173
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
174
+ image = prepare_img()
175
+ inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt")
176
+
177
+ with torch.no_grad():
178
+ outputs = model(**inputs)
179
+
180
+ if model_name == "groupvit-gcc-yfcc":
181
+ expected_logits = torch.tensor([[13.3523, 6.3629]])
182
+ elif model_name == "groupvit-gcc-redcaps":
183
+ expected_logits = torch.tensor([[16.1873, 8.6230]])
184
+ else:
185
+ raise ValueError(f"Model name {model_name} not supported.")
186
+ assert torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3)
187
+
188
+ processor.save_pretrained(pytorch_dump_folder_path)
189
+ model.save_pretrained(pytorch_dump_folder_path)
190
+ print("Successfully saved processor and model to", pytorch_dump_folder_path)
191
+
192
+ if push_to_hub:
193
+ print("Pushing to the hub...")
194
+ processor.push_to_hub(model_name, organization="nielsr")
195
+ model.push_to_hub(model_name, organization="nielsr")
196
+
197
+
198
+ if __name__ == "__main__":
199
+ parser = argparse.ArgumentParser()
200
+ parser.add_argument(
201
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
202
+ )
203
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
204
+ parser.add_argument(
205
+ "--model_name",
206
+ default="groupvit-gccy-fcc",
207
+ type=str,
208
+ help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
209
+ )
210
+ parser.add_argument(
211
+ "--push_to_hub",
212
+ action="store_true",
213
+ help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
214
+ )
215
+ args = parser.parse_args()
216
+
217
+ convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
parrot/lib/python3.10/site-packages/transformers/models/groupvit/modeling_groupvit.py ADDED
@@ -0,0 +1,1582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GroupViT model."""
16
+
17
+
18
+ import collections.abc
19
+ import math
20
+ from dataclasses import dataclass
21
+ from typing import Any, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+
28
+ from ...activations import ACT2FN
29
+ from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
30
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
31
+ from ...modeling_utils import PreTrainedModel
32
+ from ...utils import (
33
+ ModelOutput,
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ logging,
37
+ replace_return_docstrings,
38
+ )
39
+ from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+ _CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc"
45
+
46
+
47
+ # contrastive loss function, adapted from
48
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
49
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
50
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
51
+
52
+
53
+ # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->groupvit
54
+ def groupvit_loss(similarity: torch.Tensor) -> torch.Tensor:
55
+ caption_loss = contrastive_loss(similarity)
56
+ image_loss = contrastive_loss(similarity.t())
57
+ return (caption_loss + image_loss) / 2.0
58
+
59
+
60
+ def hard_softmax(logits: torch.Tensor, dim: int):
61
+ y_soft = logits.softmax(dim)
62
+ # Straight through.
63
+ index = y_soft.max(dim, keepdim=True)[1]
64
+ y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
65
+ ret = y_hard - y_soft.detach() + y_soft
66
+
67
+ return ret
68
+
69
+
70
+ def gumbel_softmax(logits: torch.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> torch.Tensor:
71
+ # more stable https://github.com/pytorch/pytorch/issues/41663
72
+ gumbel_dist = torch.distributions.gumbel.Gumbel(
73
+ torch.tensor(0.0, device=logits.device, dtype=logits.dtype),
74
+ torch.tensor(1.0, device=logits.device, dtype=logits.dtype),
75
+ )
76
+ gumbels = gumbel_dist.sample(logits.shape)
77
+
78
+ gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
79
+ y_soft = gumbels.softmax(dim)
80
+
81
+ if hard:
82
+ # Straight through.
83
+ index = y_soft.max(dim, keepdim=True)[1]
84
+ y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
85
+ ret = y_hard - y_soft.detach() + y_soft
86
+ else:
87
+ # Reparametrization trick.
88
+ ret = y_soft
89
+ return ret
90
+
91
+
92
+ def resize_attention_map(attentions, height, width, align_corners=False):
93
+ """
94
+ Args:
95
+ attentions (`torch.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]
96
+ height (`int`): height of the output attention map
97
+ width (`int`): width of the output attention map
98
+ align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.
99
+
100
+ Returns:
101
+ `torch.Tensor`: resized attention map of shape [batch_size, groups, height, width]
102
+ """
103
+
104
+ scale = (height * width // attentions.shape[2]) ** 0.5
105
+ if height > width:
106
+ feat_width = int(np.round(width / scale))
107
+ feat_height = attentions.shape[2] // feat_width
108
+ else:
109
+ feat_height = int(np.round(height / scale))
110
+ feat_width = attentions.shape[2] // feat_height
111
+
112
+ batch_size = attentions.shape[0]
113
+ groups = attentions.shape[1] # number of group token
114
+ # [batch_size, groups, height*width, groups] -> [batch_size, groups, height, width]
115
+ attentions = attentions.reshape(batch_size, groups, feat_height, feat_width)
116
+ attentions = nn.functional.interpolate(
117
+ attentions, size=(height, width), mode="bilinear", align_corners=align_corners
118
+ )
119
+ return attentions
120
+
121
+
122
+ def get_grouping_from_attentions(attentions, hw_shape):
123
+ """
124
+ Args:
125
+ attentions (`tuple(torch.FloatTensor)`: tuple of attention maps returned by `GroupViTVisionTransformer`
126
+ hw_shape (`tuple(int)`): height and width of the output attention map
127
+ Returns:
128
+ `torch.Tensor`: the attention map of shape [batch_size, groups, height, width]
129
+ """
130
+
131
+ attn_maps = []
132
+ with torch.no_grad():
133
+ prev_attn_masks = None
134
+ for attn_masks in attentions:
135
+ # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups]
136
+ attn_masks = attn_masks.permute(0, 2, 1).contiguous()
137
+ if prev_attn_masks is None:
138
+ prev_attn_masks = attn_masks
139
+ else:
140
+ prev_attn_masks = prev_attn_masks @ attn_masks
141
+ # [batch_size, heightxwidth, num_groups] -> [batch_size, num_groups, heightxwidth] -> [batch_size, num_groups, height, width]
142
+ cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape)
143
+ attn_maps.append(cur_attn_map)
144
+
145
+ # [batch_size, num_groups, height, width]
146
+ final_grouping = attn_maps[-1]
147
+
148
+ return final_grouping
149
+
150
+
151
+ class GroupViTCrossAttentionLayer(nn.Module):
152
+ def __init__(self, config: GroupViTVisionConfig):
153
+ super().__init__()
154
+ self.attn = GroupViTAttention(config)
155
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
156
+ self.mlp = GroupViTMLP(config)
157
+ self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
158
+
159
+ def forward(self, query, key):
160
+ x = query
161
+ x = x + self.attn(query, encoder_hidden_states=key)[0]
162
+ x = x + self.mlp(self.norm2(x))
163
+ x = self.norm_post(x)
164
+ return x
165
+
166
+
167
+ class GroupViTAssignAttention(nn.Module):
168
+ def __init__(self, config: GroupViTVisionConfig):
169
+ super().__init__()
170
+ self.scale = config.hidden_size**-0.5
171
+
172
+ self.q_proj = nn.Linear(config.hidden_size, config.hidden_size)
173
+ self.k_proj = nn.Linear(config.hidden_size, config.hidden_size)
174
+ self.v_proj = nn.Linear(config.hidden_size, config.hidden_size)
175
+ self.proj = nn.Linear(config.hidden_size, config.hidden_size)
176
+ self.assign_eps = config.assign_eps
177
+
178
+ def get_attn(self, attn, gumbel=True, hard=True):
179
+ if gumbel and self.training:
180
+ attn = gumbel_softmax(attn, dim=-2, hard=hard)
181
+ else:
182
+ if hard:
183
+ attn = hard_softmax(attn, dim=-2)
184
+ else:
185
+ attn = nn.functional.softmax(attn, dim=-2)
186
+
187
+ return attn
188
+
189
+ def forward(self, query, key):
190
+ value = key
191
+ # [batch_size, query_length, channels]
192
+ query = self.q_proj(query)
193
+
194
+ # [batch_size, key_length, channels]
195
+ key = self.k_proj(key)
196
+
197
+ # [batch_size, key_length, channels]
198
+ value = self.v_proj(value)
199
+
200
+ # [batch_size, query_length, key_length]
201
+ raw_attn = (query @ key.transpose(-2, -1)) * self.scale
202
+
203
+ attn = self.get_attn(raw_attn)
204
+ soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False)
205
+
206
+ attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps)
207
+
208
+ out = attn @ value
209
+
210
+ out = self.proj(out)
211
+
212
+ return out, soft_attn
213
+
214
+
215
+ class GroupViTTokenAssign(nn.Module):
216
+ def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group):
217
+ super().__init__()
218
+ self.num_output_group = num_output_group
219
+ # norm on group_tokens
220
+ self.norm_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
221
+ assign_mlp_ratio = (
222
+ config.assign_mlp_ratio
223
+ if isinstance(config.assign_mlp_ratio, collections.abc.Iterable)
224
+ else (config.assign_mlp_ratio, config.assign_mlp_ratio)
225
+ )
226
+ tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio]
227
+ self.mlp_inter = GroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group)
228
+ self.norm_post_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
229
+ # norm on x
230
+ self.norm_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
231
+ self.pre_assign_attn = GroupViTCrossAttentionLayer(config)
232
+
233
+ self.assign = GroupViTAssignAttention(config)
234
+ self.norm_new_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
235
+ self.mlp_channels = GroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size)
236
+
237
+ def project_group_token(self, group_tokens):
238
+ """
239
+ Args:
240
+ group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels]
241
+
242
+ Returns:
243
+ projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels]
244
+ """
245
+ # [B, num_output_groups, C] <- [B, num_group_tokens, C]
246
+ projected_group_tokens = self.mlp_inter(group_tokens)
247
+ projected_group_tokens = self.norm_post_tokens(projected_group_tokens)
248
+ return projected_group_tokens
249
+
250
+ def forward(self, image_tokens, group_tokens):
251
+ """
252
+ Args:
253
+ image_tokens (`torch.Tensor`): image tokens, of shape [batch_size, input_length, channels]
254
+ group_tokens (`torch.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
255
+ """
256
+
257
+ group_tokens = self.norm_tokens(group_tokens)
258
+ image_tokens = self.norm_x(image_tokens)
259
+ # [batch_size, num_output_groups, channels]
260
+ projected_group_tokens = self.project_group_token(group_tokens)
261
+ projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens)
262
+ new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens)
263
+ new_image_tokens += projected_group_tokens
264
+
265
+ new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens))
266
+
267
+ return new_image_tokens, attention
268
+
269
+
270
+ @dataclass
271
+ class GroupViTModelOutput(ModelOutput):
272
+ """
273
+ Args:
274
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
275
+ Contrastive loss for image-text similarity.
276
+ logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
277
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
278
+ similarity scores.
279
+ logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
280
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
281
+ similarity scores.
282
+ segmentation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
283
+ Classification scores for each pixel.
284
+
285
+ <Tip warning={true}>
286
+
287
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
288
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
289
+ original image size as post-processing. You should always check your logits shape and resize as needed.
290
+
291
+ </Tip>
292
+
293
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
294
+ The text embeddings obtained by applying the projection layer to the pooled output of
295
+ [`GroupViTTextModel`].
296
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
297
+ The image embeddings obtained by applying the projection layer to the pooled output of
298
+ [`GroupViTVisionModel`].
299
+ text_model_output (`BaseModelOutputWithPooling`):
300
+ The output of the [`GroupViTTextModel`].
301
+ vision_model_output (`BaseModelOutputWithPooling`):
302
+ The output of the [`GroupViTVisionModel`].
303
+ """
304
+
305
+ loss: Optional[torch.FloatTensor] = None
306
+ logits_per_image: torch.FloatTensor = None
307
+ logits_per_text: torch.FloatTensor = None
308
+ segmentation_logits: torch.FloatTensor = None
309
+ text_embeds: torch.FloatTensor = None
310
+ image_embeds: torch.FloatTensor = None
311
+ text_model_output: BaseModelOutputWithPooling = None
312
+ vision_model_output: BaseModelOutputWithPooling = None
313
+
314
+ def to_tuple(self) -> Tuple[Any]:
315
+ return tuple(
316
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
317
+ for k in self.keys()
318
+ )
319
+
320
+
321
+ class GroupViTPatchEmbeddings(nn.Module):
322
+ """
323
+ Image to Patch Embedding.
324
+ """
325
+
326
+ def __init__(
327
+ self,
328
+ image_size: int = 224,
329
+ patch_size: Union[int, Tuple[int, int]] = 16,
330
+ num_channels: int = 3,
331
+ embed_dim: int = 768,
332
+ ):
333
+ super().__init__()
334
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
335
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
336
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
337
+ self.image_size = image_size
338
+ self.patch_size = patch_size
339
+ self.num_patches = num_patches
340
+
341
+ self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
342
+
343
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
344
+ batch_size, num_channels, height, width = pixel_values.shape
345
+ if not interpolate_pos_encoding:
346
+ if height != self.image_size[0] or width != self.image_size[1]:
347
+ raise ValueError(
348
+ f"Input image size ({height}*{width}) doesn't match model"
349
+ f" ({self.image_size[0]}*{self.image_size[1]})."
350
+ )
351
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
352
+ return x
353
+
354
+
355
+ class GroupViTVisionEmbeddings(nn.Module):
356
+ def __init__(self, config: GroupViTVisionConfig):
357
+ super().__init__()
358
+
359
+ self.patch_embeddings = GroupViTPatchEmbeddings(
360
+ image_size=config.image_size,
361
+ patch_size=config.patch_size,
362
+ num_channels=config.num_channels,
363
+ embed_dim=config.hidden_size,
364
+ )
365
+ num_patches = self.patch_embeddings.num_patches
366
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size))
367
+ self.dropout = nn.Dropout(config.dropout)
368
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
369
+ self.config = config
370
+
371
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
372
+ """
373
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
374
+ resolution images.
375
+
376
+ Source:
377
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
378
+ """
379
+
380
+ npatch = embeddings.shape[1]
381
+ if npatch == self.position_embeddings.shape[1] and height == width:
382
+ return self.position_embeddings
383
+ patch_pos_embed = self.position_embeddings
384
+ num_original_pos_embed = patch_pos_embed.shape[1]
385
+ dim = embeddings.shape[-1]
386
+ feat_height = height // self.config.patch_size
387
+ feat_width = width // self.config.patch_size
388
+ # we add a small number to avoid floating point error in the interpolation
389
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
390
+ feat_height, feat_width = feat_height + 0.1, feat_width + 0.1
391
+ original_height = original_width = math.sqrt(num_original_pos_embed)
392
+ reshaped_patch_pos_embed = patch_pos_embed.reshape(1, int(original_height), int(original_width), dim).permute(
393
+ 0, 3, 1, 2
394
+ )
395
+ scale_factor = (feat_height / original_height, feat_width / original_width)
396
+ patch_pos_embed = nn.functional.interpolate(
397
+ reshaped_patch_pos_embed,
398
+ scale_factor=scale_factor,
399
+ mode="bicubic",
400
+ align_corners=False,
401
+ )
402
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
403
+ return patch_pos_embed
404
+
405
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
406
+ batch_size, num_channels, height, width = pixel_values.shape
407
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
408
+
409
+ embeddings = self.layernorm(embeddings)
410
+
411
+ batch_size, seq_len, _ = embeddings.size()
412
+
413
+ # add positional encoding to each token
414
+ if interpolate_pos_encoding:
415
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
416
+ else:
417
+ embeddings = embeddings + self.position_embeddings
418
+
419
+ embeddings = self.dropout(embeddings)
420
+
421
+ return embeddings
422
+
423
+
424
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->GroupViT
425
+ class GroupViTTextEmbeddings(nn.Module):
426
+ def __init__(self, config: GroupViTTextConfig):
427
+ super().__init__()
428
+ embed_dim = config.hidden_size
429
+
430
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
431
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
432
+
433
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
434
+ self.register_buffer(
435
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
436
+ )
437
+
438
+ def forward(
439
+ self,
440
+ input_ids: Optional[torch.LongTensor] = None,
441
+ position_ids: Optional[torch.LongTensor] = None,
442
+ inputs_embeds: Optional[torch.FloatTensor] = None,
443
+ ) -> torch.Tensor:
444
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
445
+
446
+ if position_ids is None:
447
+ position_ids = self.position_ids[:, :seq_length]
448
+
449
+ if inputs_embeds is None:
450
+ inputs_embeds = self.token_embedding(input_ids)
451
+
452
+ position_embeddings = self.position_embedding(position_ids)
453
+ embeddings = inputs_embeds + position_embeddings
454
+
455
+ return embeddings
456
+
457
+
458
+ class GroupViTStage(nn.Module):
459
+ """This corresponds to the `GroupingLayer` class in the GroupViT implementation."""
460
+
461
+ def __init__(
462
+ self,
463
+ config: GroupViTVisionConfig,
464
+ depth: int,
465
+ num_prev_group_token: int,
466
+ num_group_token: int,
467
+ num_output_group: int,
468
+ ):
469
+ super().__init__()
470
+ self.depth = depth
471
+ self.num_group_token = num_group_token
472
+ if num_group_token > 0:
473
+ self.group_token = nn.Parameter(torch.zeros(1, num_group_token, config.hidden_size))
474
+ else:
475
+ self.group_token = None
476
+ self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(depth)])
477
+
478
+ if num_group_token > 0:
479
+ self.downsample = GroupViTTokenAssign(
480
+ config=config,
481
+ num_group_token=num_group_token,
482
+ num_output_group=num_output_group,
483
+ )
484
+ else:
485
+ self.downsample = None
486
+
487
+ if num_prev_group_token > 0 and num_group_token > 0:
488
+ self.group_projector = nn.Sequential(
489
+ nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps),
490
+ GroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token),
491
+ )
492
+ else:
493
+ self.group_projector = None
494
+
495
+ @property
496
+ def with_group_token(self):
497
+ return self.group_token is not None
498
+
499
+ def split_x(self, x):
500
+ if self.with_group_token:
501
+ return x[:, : -self.num_group_token], x[:, -self.num_group_token :]
502
+ else:
503
+ return x, None
504
+
505
+ def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor] = None) -> torch.Tensor:
506
+ if group_token is None:
507
+ return x
508
+ return torch.cat([x, group_token], dim=1)
509
+
510
+ def forward(
511
+ self,
512
+ hidden_states: torch.Tensor,
513
+ prev_group_token: Optional[torch.Tensor] = None,
514
+ output_attentions: Optional[bool] = False,
515
+ ) -> Tuple[torch.FloatTensor]:
516
+ """
517
+ Args:
518
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
519
+ attention_mask (`torch.FloatTensor`): attention mask of size
520
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
521
+ `(config.encoder_attention_heads,)`.
522
+ output_attentions (`bool`, *optional*):
523
+ Whether or not to return the grouping tensors of Grouping block.
524
+ """
525
+ if self.with_group_token:
526
+ group_token = self.group_token.expand(hidden_states.size(0), -1, -1)
527
+ if self.group_projector is not None:
528
+ group_token = group_token + self.group_projector(prev_group_token)
529
+ else:
530
+ group_token = None
531
+
532
+ x = hidden_states
533
+
534
+ cat_x = self.concat_x(x, group_token)
535
+ for layer in self.layers:
536
+ layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None)
537
+ cat_x = layer_out[0]
538
+
539
+ x, group_token = self.split_x(cat_x)
540
+
541
+ attention = None
542
+ if self.downsample is not None:
543
+ x, attention = self.downsample(x, group_token)
544
+
545
+ outputs = (x, group_token)
546
+ if output_attentions:
547
+ outputs = outputs + (attention,)
548
+
549
+ return outputs
550
+
551
+
552
+ class GroupViTMLP(nn.Module):
553
+ def __init__(
554
+ self,
555
+ config: GroupViTVisionConfig,
556
+ hidden_size: Optional[int] = None,
557
+ intermediate_size: Optional[int] = None,
558
+ output_size: Optional[int] = None,
559
+ ):
560
+ super().__init__()
561
+ self.config = config
562
+ self.activation_fn = ACT2FN[config.hidden_act]
563
+ hidden_size = hidden_size if hidden_size is not None else config.hidden_size
564
+ intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
565
+ output_size = output_size if output_size is not None else hidden_size
566
+ self.fc1 = nn.Linear(hidden_size, intermediate_size)
567
+ self.fc2 = nn.Linear(intermediate_size, output_size)
568
+
569
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
570
+ hidden_states = self.fc1(hidden_states)
571
+ hidden_states = self.activation_fn(hidden_states)
572
+ hidden_states = self.fc2(hidden_states)
573
+ return hidden_states
574
+
575
+
576
+ class GroupViTMixerMLP(GroupViTMLP):
577
+ def forward(self, x):
578
+ x = super().forward(x.transpose(1, 2))
579
+ return x.transpose(1, 2)
580
+
581
+
582
+ class GroupViTAttention(nn.Module):
583
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
584
+
585
+ def __init__(self, config):
586
+ super().__init__()
587
+ self.config = config
588
+ self.embed_dim = config.hidden_size
589
+ self.num_heads = config.num_attention_heads
590
+ self.head_dim = self.embed_dim // self.num_heads
591
+ if self.head_dim * self.num_heads != self.embed_dim:
592
+ raise ValueError(
593
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
594
+ f" {self.num_heads})."
595
+ )
596
+ self.scale = self.head_dim**-0.5
597
+ self.dropout = config.attention_dropout
598
+
599
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
600
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
601
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
602
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
603
+
604
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
605
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
606
+
607
+ def forward(
608
+ self,
609
+ hidden_states: torch.Tensor,
610
+ attention_mask: Optional[torch.Tensor] = None,
611
+ causal_attention_mask: Optional[torch.Tensor] = None,
612
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
613
+ output_attentions: Optional[bool] = False,
614
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
615
+ """Input shape: Batch x Time x Channel"""
616
+
617
+ bsz, tgt_len, embed_dim = hidden_states.size()
618
+ is_cross_attention = encoder_hidden_states is not None
619
+
620
+ # get query proj
621
+ query_states = self.q_proj(hidden_states) * self.scale
622
+ if is_cross_attention:
623
+ key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz)
624
+ value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz)
625
+ else:
626
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
627
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
628
+
629
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
630
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
631
+ key_states = key_states.view(*proj_shape)
632
+ value_states = value_states.view(*proj_shape)
633
+
634
+ src_len = key_states.size(1)
635
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
636
+
637
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
638
+ raise ValueError(
639
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
640
+ f" {attn_weights.size()}"
641
+ )
642
+
643
+ # apply the causal_attention_mask first
644
+ if causal_attention_mask is not None:
645
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
646
+ raise ValueError(
647
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
648
+ f" {causal_attention_mask.size()}"
649
+ )
650
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
651
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
652
+
653
+ if attention_mask is not None:
654
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
655
+ raise ValueError(
656
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
657
+ )
658
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
659
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
660
+
661
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
662
+
663
+ if output_attentions:
664
+ # this operation is a bit akward, but it's required to
665
+ # make sure that attn_weights keeps its gradient.
666
+ # In order to do so, attn_weights have to reshaped
667
+ # twice and have to be reused in the following
668
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
669
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
670
+ else:
671
+ attn_weights_reshaped = None
672
+
673
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
674
+
675
+ attn_output = torch.bmm(attn_probs, value_states)
676
+
677
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
678
+ raise ValueError(
679
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
680
+ f" {attn_output.size()}"
681
+ )
682
+
683
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
684
+ attn_output = attn_output.transpose(1, 2)
685
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
686
+
687
+ attn_output = self.out_proj(attn_output)
688
+
689
+ return attn_output, attn_weights_reshaped
690
+
691
+
692
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GroupViT
693
+ class GroupViTEncoderLayer(nn.Module):
694
+ def __init__(self, config: GroupViTConfig):
695
+ super().__init__()
696
+ self.embed_dim = config.hidden_size
697
+ self.self_attn = GroupViTAttention(config)
698
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
699
+ self.mlp = GroupViTMLP(config)
700
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
701
+
702
+ def forward(
703
+ self,
704
+ hidden_states: torch.Tensor,
705
+ attention_mask: torch.Tensor,
706
+ causal_attention_mask: torch.Tensor,
707
+ output_attentions: Optional[bool] = False,
708
+ ) -> Tuple[torch.FloatTensor]:
709
+ """
710
+ Args:
711
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
712
+ attention_mask (`torch.FloatTensor`): attention mask of size
713
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
714
+ `(config.encoder_attention_heads,)`.
715
+ output_attentions (`bool`, *optional*):
716
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
717
+ returned tensors for more detail.
718
+ """
719
+ residual = hidden_states
720
+
721
+ hidden_states = self.layer_norm1(hidden_states)
722
+ hidden_states, attn_weights = self.self_attn(
723
+ hidden_states=hidden_states,
724
+ attention_mask=attention_mask,
725
+ causal_attention_mask=causal_attention_mask,
726
+ output_attentions=output_attentions,
727
+ )
728
+ hidden_states = residual + hidden_states
729
+
730
+ residual = hidden_states
731
+ hidden_states = self.layer_norm2(hidden_states)
732
+ hidden_states = self.mlp(hidden_states)
733
+ hidden_states = residual + hidden_states
734
+
735
+ outputs = (hidden_states,)
736
+
737
+ if output_attentions:
738
+ outputs += (attn_weights,)
739
+
740
+ return outputs
741
+
742
+
743
+ class GroupViTPreTrainedModel(PreTrainedModel):
744
+ """
745
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
746
+ models.
747
+ """
748
+
749
+ config_class = GroupViTConfig
750
+ base_model_prefix = "groupvit"
751
+ supports_gradient_checkpointing = True
752
+
753
+ def _init_weights(self, module):
754
+ """Initialize the weights"""
755
+
756
+ init_range = self.config.initializer_range
757
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
758
+ # Slightly different from the TF version which uses truncated_normal for initialization
759
+ # cf https://github.com/pytorch/pytorch/pull/5617
760
+ module.weight.data.normal_(mean=0.0, std=init_range)
761
+ if module.bias is not None:
762
+ module.bias.data.zero_()
763
+ elif isinstance(module, nn.LayerNorm):
764
+ module.bias.data.zero_()
765
+ module.weight.data.fill_(1.0)
766
+
767
+ factor = self.config.initializer_factor
768
+ if isinstance(module, GroupViTTextEmbeddings):
769
+ module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
770
+ module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
771
+ elif isinstance(module, GroupViTAttention):
772
+ factor = self.config.initializer_factor
773
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
774
+ out_proj_std = (module.embed_dim**-0.5) * factor
775
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
776
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
777
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
778
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
779
+ elif isinstance(module, GroupViTMLP):
780
+ factor = self.config.initializer_factor
781
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
782
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
783
+ nn.init.normal_(module.fc1.weight, std=fc_std)
784
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
785
+
786
+
787
+ GROUPVIT_START_DOCSTRING = r"""
788
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
789
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
790
+ behavior.
791
+
792
+ Parameters:
793
+ config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model.
794
+ Initializing with a config file does not load the weights associated with the model, only the
795
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
796
+ """
797
+
798
+ GROUPVIT_TEXT_INPUTS_DOCSTRING = r"""
799
+ Args:
800
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
801
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
802
+ it.
803
+
804
+ Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and
805
+ [`PreTrainedTokenizer.__call__`] for details.
806
+
807
+ [What are input IDs?](../glossary#input-ids)
808
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
809
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
810
+
811
+ - 1 for tokens that are **not masked**,
812
+ - 0 for tokens that are **masked**.
813
+
814
+ [What are attention masks?](../glossary#attention-mask)
815
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
816
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
817
+ config.max_position_embeddings - 1]`.
818
+
819
+ [What are position IDs?](../glossary#position-ids)
820
+ output_attentions (`bool`, *optional*):
821
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
822
+ tensors for more detail.
823
+ output_hidden_states (`bool`, *optional*):
824
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
825
+ more detail.
826
+ return_dict (`bool`, *optional*):
827
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
828
+ """
829
+
830
+ GROUPVIT_VISION_INPUTS_DOCSTRING = r"""
831
+ Args:
832
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
833
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
834
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
835
+ output_attentions (`bool`, *optional*):
836
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
837
+ tensors for more detail.
838
+ output_hidden_states (`bool`, *optional*):
839
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
840
+ more detail.
841
+ return_dict (`bool`, *optional*):
842
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
843
+ """
844
+
845
+ GROUPVIT_INPUTS_DOCSTRING = r"""
846
+ Args:
847
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
848
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
849
+ it.
850
+
851
+ Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and
852
+ [`PreTrainedTokenizer.__call__`] for details.
853
+
854
+ [What are input IDs?](../glossary#input-ids)
855
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
856
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
857
+
858
+ - 1 for tokens that are **not masked**,
859
+ - 0 for tokens that are **masked**.
860
+
861
+ [What are attention masks?](../glossary#attention-mask)
862
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
863
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
864
+ config.max_position_embeddings - 1]`.
865
+
866
+ [What are position IDs?](../glossary#position-ids)
867
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
868
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
869
+ [`CLIPImageProcessor.__call__`] for details.
870
+ return_loss (`bool`, *optional*):
871
+ Whether or not to return the contrastive loss.
872
+ output_attentions (`bool`, *optional*):
873
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
874
+ tensors for more detail.
875
+ output_hidden_states (`bool`, *optional*):
876
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
877
+ more detail.
878
+ return_dict (`bool`, *optional*):
879
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
880
+ """
881
+
882
+
883
+ class GroupViTVisionEncoder(nn.Module):
884
+ def __init__(self, config: GroupViTVisionConfig) -> None:
885
+ super().__init__()
886
+ self.config = config
887
+ self.stages = nn.ModuleList(
888
+ [
889
+ GroupViTStage(
890
+ config=config,
891
+ depth=config.depths[i],
892
+ num_group_token=config.num_group_tokens[i],
893
+ num_output_group=config.num_output_groups[i],
894
+ num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0,
895
+ )
896
+ for i in range(len(config.depths))
897
+ ]
898
+ )
899
+ self.gradient_checkpointing = False
900
+
901
+ def forward(
902
+ self,
903
+ hidden_states: torch.Tensor,
904
+ output_hidden_states: Optional[bool] = None,
905
+ output_attentions: Optional[bool] = None,
906
+ return_dict: Optional[bool] = None,
907
+ ) -> Union[tuple, BaseModelOutput]:
908
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
909
+ output_hidden_states = (
910
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
911
+ )
912
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
913
+
914
+ all_hidden_states = () if output_hidden_states else None
915
+ all_groupings = () if output_attentions else None
916
+
917
+ group_tokens = None
918
+
919
+ for i, stage in enumerate(self.stages):
920
+ if output_hidden_states:
921
+ all_hidden_states = all_hidden_states + (hidden_states,)
922
+
923
+ layer_outputs = stage(hidden_states, group_tokens, output_attentions)
924
+
925
+ hidden_states = layer_outputs[0]
926
+ group_tokens = layer_outputs[1]
927
+
928
+ if output_attentions and layer_outputs[2] is not None:
929
+ all_groupings = all_groupings + (layer_outputs[2],)
930
+
931
+ if output_hidden_states:
932
+ all_hidden_states = all_hidden_states + (hidden_states,)
933
+
934
+ if not return_dict:
935
+ return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)
936
+ return BaseModelOutput(
937
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings
938
+ )
939
+
940
+
941
+ class GroupViTTextEncoder(nn.Module):
942
+ """
943
+ Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a
944
+ [`GroupViTEncoderLayer`].
945
+
946
+ Args:
947
+ config: GroupViTTextConfig
948
+ """
949
+
950
+ def __init__(self, config: GroupViTTextConfig):
951
+ super().__init__()
952
+ self.config = config
953
+ self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)])
954
+ self.gradient_checkpointing = False
955
+
956
+ def forward(
957
+ self,
958
+ inputs_embeds,
959
+ attention_mask: Optional[torch.Tensor] = None,
960
+ causal_attention_mask: Optional[torch.Tensor] = None,
961
+ output_attentions: Optional[bool] = None,
962
+ output_hidden_states: Optional[bool] = None,
963
+ return_dict: Optional[bool] = None,
964
+ ) -> Union[Tuple, BaseModelOutput]:
965
+ r"""
966
+ Args:
967
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
968
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
969
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
970
+ than the model's internal embedding lookup matrix.
971
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
972
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
973
+
974
+ - 1 for tokens that are **not masked**,
975
+ - 0 for tokens that are **masked**.
976
+
977
+ [What are attention masks?](../glossary#attention-mask)
978
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
979
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
980
+
981
+ - 1 for tokens that are **not masked**,
982
+ - 0 for tokens that are **masked**.
983
+
984
+ [What are attention masks?](../glossary#attention-mask)
985
+ output_attentions (`bool`, *optional*):
986
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
987
+ returned tensors for more detail.
988
+ output_hidden_states (`bool`, *optional*):
989
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
990
+ for more detail.
991
+ return_dict (`bool`, *optional*):
992
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
993
+ """
994
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
995
+ output_hidden_states = (
996
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
997
+ )
998
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
999
+
1000
+ encoder_states = () if output_hidden_states else None
1001
+ all_attentions = () if output_attentions else None
1002
+
1003
+ hidden_states = inputs_embeds
1004
+ for idx, encoder_layer in enumerate(self.layers):
1005
+ if output_hidden_states:
1006
+ encoder_states = encoder_states + (hidden_states,)
1007
+ if self.gradient_checkpointing and self.training:
1008
+ layer_outputs = self._gradient_checkpointing_func(
1009
+ encoder_layer.__call__,
1010
+ hidden_states,
1011
+ attention_mask,
1012
+ causal_attention_mask,
1013
+ output_attentions,
1014
+ )
1015
+ else:
1016
+ layer_outputs = encoder_layer(
1017
+ hidden_states,
1018
+ attention_mask,
1019
+ causal_attention_mask,
1020
+ output_attentions=output_attentions,
1021
+ )
1022
+
1023
+ hidden_states = layer_outputs[0]
1024
+
1025
+ if output_attentions:
1026
+ all_attentions = all_attentions + (layer_outputs[1],)
1027
+
1028
+ if output_hidden_states:
1029
+ encoder_states = encoder_states + (hidden_states,)
1030
+
1031
+ if not return_dict:
1032
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
1033
+ return BaseModelOutput(
1034
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
1035
+ )
1036
+
1037
+
1038
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder, CLIP_TEXT->GROUPVIT_TEXT
1039
+ class GroupViTTextTransformer(nn.Module):
1040
+ def __init__(self, config: GroupViTTextConfig):
1041
+ super().__init__()
1042
+ self.config = config
1043
+ embed_dim = config.hidden_size
1044
+ self.embeddings = GroupViTTextEmbeddings(config)
1045
+ self.encoder = GroupViTTextEncoder(config)
1046
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1047
+
1048
+ # For `pooled_output` computation
1049
+ self.eos_token_id = config.eos_token_id
1050
+
1051
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
1052
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig)
1053
+ def forward(
1054
+ self,
1055
+ input_ids: Optional[torch.Tensor] = None,
1056
+ attention_mask: Optional[torch.Tensor] = None,
1057
+ position_ids: Optional[torch.Tensor] = None,
1058
+ output_attentions: Optional[bool] = None,
1059
+ output_hidden_states: Optional[bool] = None,
1060
+ return_dict: Optional[bool] = None,
1061
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1062
+ r"""
1063
+ Returns:
1064
+
1065
+ """
1066
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1067
+ output_hidden_states = (
1068
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1069
+ )
1070
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1071
+
1072
+ if input_ids is None:
1073
+ raise ValueError("You have to specify input_ids")
1074
+
1075
+ input_shape = input_ids.size()
1076
+ input_ids = input_ids.view(-1, input_shape[-1])
1077
+
1078
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
1079
+
1080
+ # CLIP's text model uses causal mask, prepare it here.
1081
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
1082
+ causal_attention_mask = _create_4d_causal_attention_mask(
1083
+ input_shape, hidden_states.dtype, device=hidden_states.device
1084
+ )
1085
+ # expand attention_mask
1086
+ if attention_mask is not None:
1087
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1088
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
1089
+
1090
+ encoder_outputs = self.encoder(
1091
+ inputs_embeds=hidden_states,
1092
+ attention_mask=attention_mask,
1093
+ causal_attention_mask=causal_attention_mask,
1094
+ output_attentions=output_attentions,
1095
+ output_hidden_states=output_hidden_states,
1096
+ return_dict=return_dict,
1097
+ )
1098
+
1099
+ last_hidden_state = encoder_outputs[0]
1100
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
1101
+
1102
+ if self.eos_token_id == 2:
1103
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
1104
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
1105
+ # ------------------------------------------------------------
1106
+ # text_embeds.shape = [batch_size, sequence_length, transformer.width]
1107
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
1108
+ # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
1109
+ pooled_output = last_hidden_state[
1110
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
1111
+ input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
1112
+ ]
1113
+ else:
1114
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
1115
+ pooled_output = last_hidden_state[
1116
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
1117
+ # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)
1118
+ # Note: we assume each sequence (along batch dim.) contains an `eos_token_id` (e.g. prepared by the tokenizer)
1119
+ (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)
1120
+ .int()
1121
+ .argmax(dim=-1),
1122
+ ]
1123
+
1124
+ if not return_dict:
1125
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1126
+
1127
+ return BaseModelOutputWithPooling(
1128
+ last_hidden_state=last_hidden_state,
1129
+ pooler_output=pooled_output,
1130
+ hidden_states=encoder_outputs.hidden_states,
1131
+ attentions=encoder_outputs.attentions,
1132
+ )
1133
+
1134
+
1135
+ class GroupViTTextModel(GroupViTPreTrainedModel):
1136
+ config_class = GroupViTTextConfig
1137
+
1138
+ def __init__(self, config: GroupViTTextConfig):
1139
+ super().__init__(config)
1140
+ self.text_model = GroupViTTextTransformer(config)
1141
+ # Initialize weights and apply final processing
1142
+ self.post_init()
1143
+
1144
+ def get_input_embeddings(self) -> nn.Module:
1145
+ return self.text_model.embeddings.token_embedding
1146
+
1147
+ def set_input_embeddings(self, value):
1148
+ self.text_model.embeddings.token_embedding = value
1149
+
1150
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
1151
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig)
1152
+ def forward(
1153
+ self,
1154
+ input_ids: Optional[torch.Tensor] = None,
1155
+ attention_mask: Optional[torch.Tensor] = None,
1156
+ position_ids: Optional[torch.Tensor] = None,
1157
+ output_attentions: Optional[bool] = None,
1158
+ output_hidden_states: Optional[bool] = None,
1159
+ return_dict: Optional[bool] = None,
1160
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1161
+ r"""
1162
+ Returns:
1163
+
1164
+ Examples:
1165
+
1166
+ ```python
1167
+ >>> from transformers import CLIPTokenizer, GroupViTTextModel
1168
+
1169
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
1170
+ >>> model = GroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1171
+
1172
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1173
+
1174
+ >>> outputs = model(**inputs)
1175
+ >>> last_hidden_state = outputs.last_hidden_state
1176
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
1177
+ ```"""
1178
+ return self.text_model(
1179
+ input_ids=input_ids,
1180
+ attention_mask=attention_mask,
1181
+ position_ids=position_ids,
1182
+ output_attentions=output_attentions,
1183
+ output_hidden_states=output_hidden_states,
1184
+ return_dict=return_dict,
1185
+ )
1186
+
1187
+
1188
+ class GroupViTVisionTransformer(nn.Module):
1189
+ def __init__(self, config: GroupViTVisionConfig):
1190
+ super().__init__()
1191
+ self.config = config
1192
+ embed_dim = config.hidden_size
1193
+
1194
+ self.embeddings = GroupViTVisionEmbeddings(config)
1195
+ self.encoder = GroupViTVisionEncoder(config)
1196
+ self.layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1197
+
1198
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
1199
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
1200
+ def forward(
1201
+ self,
1202
+ pixel_values: Optional[torch.FloatTensor] = None,
1203
+ output_hidden_states: Optional[bool] = None,
1204
+ output_attentions: Optional[bool] = None,
1205
+ return_dict: Optional[bool] = None,
1206
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1207
+ r"""
1208
+ Returns:
1209
+
1210
+ """
1211
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1212
+ output_hidden_states = (
1213
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1214
+ )
1215
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1216
+
1217
+ if pixel_values is None:
1218
+ raise ValueError("You have to specify pixel_values")
1219
+
1220
+ hidden_states = self.embeddings(pixel_values)
1221
+
1222
+ encoder_outputs = self.encoder(
1223
+ hidden_states=hidden_states,
1224
+ output_hidden_states=output_hidden_states,
1225
+ output_attentions=output_attentions,
1226
+ return_dict=return_dict,
1227
+ )
1228
+
1229
+ last_hidden_state = encoder_outputs[0]
1230
+
1231
+ # normalize the last hidden state
1232
+ last_hidden_state = self.layernorm(last_hidden_state)
1233
+ pooled_output = last_hidden_state.mean(dim=1)
1234
+
1235
+ if not return_dict:
1236
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1237
+
1238
+ return BaseModelOutputWithPooling(
1239
+ last_hidden_state=last_hidden_state,
1240
+ pooler_output=pooled_output,
1241
+ hidden_states=encoder_outputs.hidden_states,
1242
+ attentions=encoder_outputs.attentions,
1243
+ )
1244
+
1245
+
1246
+ class GroupViTVisionModel(GroupViTPreTrainedModel):
1247
+ config_class = GroupViTVisionConfig
1248
+ main_input_name = "pixel_values"
1249
+
1250
+ def __init__(self, config: GroupViTVisionConfig):
1251
+ super().__init__(config)
1252
+ self.vision_model = GroupViTVisionTransformer(config)
1253
+ # Initialize weights and apply final processing
1254
+ self.post_init()
1255
+
1256
+ def get_input_embeddings(self) -> GroupViTPatchEmbeddings:
1257
+ return self.vision_model.embeddings.patch_embeddings
1258
+
1259
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
1260
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
1261
+ def forward(
1262
+ self,
1263
+ pixel_values: Optional[torch.FloatTensor] = None,
1264
+ output_attentions: Optional[bool] = None,
1265
+ output_hidden_states: Optional[bool] = None,
1266
+ return_dict: Optional[bool] = None,
1267
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1268
+ r"""
1269
+ Returns:
1270
+
1271
+ Examples:
1272
+
1273
+ ```python
1274
+ >>> from PIL import Image
1275
+ >>> import requests
1276
+ >>> from transformers import AutoProcessor, GroupViTVisionModel
1277
+
1278
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
1279
+ >>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1280
+
1281
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1282
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1283
+
1284
+ >>> inputs = processor(images=image, return_tensors="pt")
1285
+
1286
+ >>> outputs = model(**inputs)
1287
+ >>> last_hidden_state = outputs.last_hidden_state
1288
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1289
+ ```"""
1290
+ return self.vision_model(
1291
+ pixel_values=pixel_values,
1292
+ output_attentions=output_attentions,
1293
+ output_hidden_states=output_hidden_states,
1294
+ return_dict=return_dict,
1295
+ )
1296
+
1297
+
1298
+ @add_start_docstrings(GROUPVIT_START_DOCSTRING)
1299
+ class GroupViTModel(GroupViTPreTrainedModel):
1300
+ config_class = GroupViTConfig
1301
+
1302
+ def __init__(self, config: GroupViTConfig):
1303
+ super().__init__(config)
1304
+
1305
+ if not isinstance(config.text_config, GroupViTTextConfig):
1306
+ raise ValueError(
1307
+ "config.text_config is expected to be of type GroupViTTextConfig but is of type"
1308
+ f" {type(config.text_config)}."
1309
+ )
1310
+
1311
+ if not isinstance(config.vision_config, GroupViTVisionConfig):
1312
+ raise ValueError(
1313
+ "config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
1314
+ f" {type(config.vision_config)}."
1315
+ )
1316
+
1317
+ text_config = config.text_config
1318
+ vision_config = config.vision_config
1319
+
1320
+ self.projection_dim = config.projection_dim
1321
+ self.projection_intermediate_dim = config.projection_intermediate_dim
1322
+ self.text_embed_dim = text_config.hidden_size
1323
+ self.vision_embed_dim = vision_config.hidden_size
1324
+
1325
+ self.text_model = GroupViTTextTransformer(text_config)
1326
+ self.vision_model = GroupViTVisionTransformer(vision_config)
1327
+
1328
+ self.visual_projection = nn.Sequential(
1329
+ nn.Linear(self.vision_embed_dim, self.projection_intermediate_dim, bias=True),
1330
+ nn.BatchNorm1d(self.projection_intermediate_dim),
1331
+ nn.ReLU(inplace=True),
1332
+ nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True),
1333
+ )
1334
+ self.text_projection = nn.Sequential(
1335
+ nn.Linear(self.text_embed_dim, self.projection_intermediate_dim, bias=True),
1336
+ nn.BatchNorm1d(self.projection_intermediate_dim),
1337
+ nn.ReLU(inplace=True),
1338
+ nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True),
1339
+ )
1340
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
1341
+
1342
+ # Initialize weights and apply final processing
1343
+ self.post_init()
1344
+
1345
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
1346
+ def get_text_features(
1347
+ self,
1348
+ input_ids: Optional[torch.Tensor] = None,
1349
+ attention_mask: Optional[torch.Tensor] = None,
1350
+ position_ids: Optional[torch.Tensor] = None,
1351
+ output_attentions: Optional[bool] = None,
1352
+ output_hidden_states: Optional[bool] = None,
1353
+ return_dict: Optional[bool] = None,
1354
+ ) -> torch.FloatTensor:
1355
+ r"""
1356
+ Returns:
1357
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1358
+ applying the projection layer to the pooled output of [`GroupViTTextModel`].
1359
+
1360
+ Examples:
1361
+
1362
+ ```python
1363
+ >>> from transformers import CLIPTokenizer, GroupViTModel
1364
+
1365
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1366
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
1367
+
1368
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1369
+ >>> text_features = model.get_text_features(**inputs)
1370
+ ```"""
1371
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
1372
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1373
+ output_hidden_states = (
1374
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1375
+ )
1376
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1377
+
1378
+ text_outputs = self.text_model(
1379
+ input_ids=input_ids,
1380
+ attention_mask=attention_mask,
1381
+ position_ids=position_ids,
1382
+ output_attentions=output_attentions,
1383
+ output_hidden_states=output_hidden_states,
1384
+ return_dict=return_dict,
1385
+ )
1386
+
1387
+ pooled_output = text_outputs[1]
1388
+ text_features = self.text_projection(pooled_output)
1389
+
1390
+ return text_features
1391
+
1392
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
1393
+ def get_image_features(
1394
+ self,
1395
+ pixel_values: Optional[torch.FloatTensor] = None,
1396
+ output_attentions: Optional[bool] = None,
1397
+ output_hidden_states: Optional[bool] = None,
1398
+ return_dict: Optional[bool] = None,
1399
+ ) -> torch.FloatTensor:
1400
+ r"""
1401
+ Returns:
1402
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1403
+ applying the projection layer to the pooled output of [`GroupViTVisionModel`].
1404
+
1405
+ Examples:
1406
+
1407
+ ```python
1408
+ >>> from PIL import Image
1409
+ >>> import requests
1410
+ >>> from transformers import AutoProcessor, GroupViTModel
1411
+
1412
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1413
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
1414
+
1415
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1416
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1417
+
1418
+ >>> inputs = processor(images=image, return_tensors="pt")
1419
+
1420
+ >>> image_features = model.get_image_features(**inputs)
1421
+ ```"""
1422
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
1423
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1424
+ output_hidden_states = (
1425
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1426
+ )
1427
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1428
+
1429
+ vision_outputs = self.vision_model(
1430
+ pixel_values=pixel_values,
1431
+ output_attentions=output_attentions,
1432
+ output_hidden_states=output_hidden_states,
1433
+ return_dict=return_dict,
1434
+ )
1435
+
1436
+ pooled_output = vision_outputs[1] # pooled_output
1437
+ image_features = self.visual_projection(pooled_output)
1438
+
1439
+ return image_features
1440
+
1441
+ @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING)
1442
+ @replace_return_docstrings(output_type=GroupViTModelOutput, config_class=GroupViTConfig)
1443
+ def forward(
1444
+ self,
1445
+ input_ids: Optional[torch.LongTensor] = None,
1446
+ pixel_values: Optional[torch.FloatTensor] = None,
1447
+ attention_mask: Optional[torch.Tensor] = None,
1448
+ position_ids: Optional[torch.LongTensor] = None,
1449
+ return_loss: Optional[bool] = None,
1450
+ output_attentions: Optional[bool] = None,
1451
+ output_hidden_states: Optional[bool] = None,
1452
+ output_segmentation: Optional[bool] = None,
1453
+ return_dict: Optional[bool] = None,
1454
+ ) -> Union[Tuple, GroupViTModelOutput]:
1455
+ r"""
1456
+ Returns:
1457
+
1458
+ Examples:
1459
+
1460
+ ```python
1461
+ >>> from PIL import Image
1462
+ >>> import requests
1463
+ >>> from transformers import AutoProcessor, GroupViTModel
1464
+
1465
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1466
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
1467
+
1468
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1469
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1470
+
1471
+ >>> inputs = processor(
1472
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
1473
+ ... )
1474
+
1475
+ >>> outputs = model(**inputs)
1476
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1477
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1478
+ ```"""
1479
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
1480
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1481
+ output_segmentation = (
1482
+ output_segmentation if output_segmentation is not None else self.config.output_segmentation
1483
+ )
1484
+ if output_segmentation:
1485
+ output_attentions = True
1486
+ output_hidden_states = (
1487
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1488
+ )
1489
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1490
+
1491
+ vision_outputs = self.vision_model(
1492
+ pixel_values=pixel_values,
1493
+ output_attentions=output_attentions,
1494
+ output_hidden_states=output_hidden_states,
1495
+ return_dict=return_dict,
1496
+ )
1497
+
1498
+ text_outputs = self.text_model(
1499
+ input_ids=input_ids,
1500
+ attention_mask=attention_mask,
1501
+ position_ids=position_ids,
1502
+ output_attentions=output_attentions,
1503
+ output_hidden_states=output_hidden_states,
1504
+ return_dict=return_dict,
1505
+ )
1506
+
1507
+ image_embeds = vision_outputs[1]
1508
+ image_embeds = self.visual_projection(image_embeds)
1509
+
1510
+ text_embeds = text_outputs[1]
1511
+ text_embeds = self.text_projection(text_embeds)
1512
+
1513
+ # normalized features
1514
+ image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
1515
+ text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
1516
+
1517
+ # cosine similarity as logits
1518
+ logit_scale = self.logit_scale.exp()
1519
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
1520
+ logits_per_image = logits_per_text.t()
1521
+
1522
+ seg_logits = None
1523
+ if output_segmentation:
1524
+ # grouped features
1525
+ # [batch_size_image, num_group, hidden_size]
1526
+ image_group_embeds = vision_outputs[0]
1527
+ # [batch_size_image*num_group, hidden_size]
1528
+ image_group_embeds = self.visual_projection(image_group_embeds.reshape(-1, image_group_embeds.shape[-1]))
1529
+ if output_hidden_states:
1530
+ attentions = vision_outputs[3]
1531
+ else:
1532
+ attentions = vision_outputs[2]
1533
+ # [batch_size_image, num_group, height, width]
1534
+ grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:])
1535
+
1536
+ # normalized features
1537
+ image_group_embeds = image_group_embeds / image_group_embeds.norm(dim=-1, keepdim=True)
1538
+ # [batch_size_image x num_group, batch_size_text]
1539
+ logits_per_image_group = torch.matmul(image_group_embeds, text_embeds.t()) * logit_scale
1540
+ # [batch_size_image, batch_size_text, num_group]
1541
+ logits_per_image_group = logits_per_image_group.reshape(
1542
+ image_embeds.shape[0], -1, text_embeds.shape[0]
1543
+ ).permute(0, 2, 1)
1544
+
1545
+ # [batch_size_image, batch_size_text, height x width]
1546
+ flatten_grouping = grouping.reshape(grouping.shape[0], grouping.shape[1], -1)
1547
+
1548
+ # [batch_size_image, batch_size_text, height, width]
1549
+ seg_logits = torch.matmul(logits_per_image_group, flatten_grouping) * logit_scale
1550
+ seg_logits = seg_logits.reshape(
1551
+ seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3]
1552
+ )
1553
+
1554
+ loss = None
1555
+ if return_loss:
1556
+ loss = groupvit_loss(logits_per_text)
1557
+
1558
+ if not return_dict:
1559
+ if seg_logits is not None:
1560
+ output = (
1561
+ logits_per_image,
1562
+ logits_per_text,
1563
+ seg_logits,
1564
+ text_embeds,
1565
+ image_embeds,
1566
+ text_outputs,
1567
+ vision_outputs,
1568
+ )
1569
+ else:
1570
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1571
+ return ((loss,) + output) if loss is not None else output
1572
+
1573
+ return GroupViTModelOutput(
1574
+ loss=loss,
1575
+ logits_per_image=logits_per_image,
1576
+ logits_per_text=logits_per_text,
1577
+ segmentation_logits=seg_logits,
1578
+ text_embeds=text_embeds,
1579
+ image_embeds=image_embeds,
1580
+ text_model_output=text_outputs,
1581
+ vision_model_output=vision_outputs,
1582
+ )
parrot/lib/python3.10/site-packages/transformers/models/groupvit/modeling_tf_groupvit.py ADDED
@@ -0,0 +1,2139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 GroupViT model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import collections.abc
21
+ import math
22
+ from dataclasses import dataclass
23
+ from typing import Any, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling
30
+ from ...modeling_tf_utils import (
31
+ TFModelInputType,
32
+ TFPreTrainedModel,
33
+ get_initializer,
34
+ keras,
35
+ keras_serializable,
36
+ unpack_inputs,
37
+ )
38
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
39
+ from ...utils import (
40
+ ModelOutput,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ is_tensorflow_probability_available,
44
+ logging,
45
+ replace_return_docstrings,
46
+ )
47
+ from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ # soft dependency
53
+ if is_tensorflow_probability_available():
54
+ try:
55
+ import tensorflow_probability as tfp
56
+
57
+ # On the first call, check whether a compatible version of TensorFlow is installed
58
+ # TensorFlow Probability depends on a recent stable release of TensorFlow
59
+ _ = tfp.distributions.Normal(loc=0.0, scale=1.0)
60
+ except ImportError:
61
+ logger.error(
62
+ "GroupViT models are not usable since `tensorflow_probability` can't be loaded. "
63
+ "It seems you have `tensorflow_probability` installed with the wrong tensorflow version."
64
+ "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability."
65
+ )
66
+ else:
67
+ try:
68
+ import tensorflow_probability as tfp
69
+
70
+ # On the first call, check whether a compatible version of TensorFlow is installed
71
+ # TensorFlow Probability depends on a recent stable release of TensorFlow
72
+ _ = tfp.distributions.Normal(loc=0.0, scale=1.0)
73
+ except ImportError:
74
+ pass
75
+
76
+ _CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc"
77
+
78
+
79
+ LARGE_NEGATIVE = -1e8
80
+
81
+
82
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
83
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
84
+ """
85
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
86
+ """
87
+ src_len = shape_list(mask)[1]
88
+ tgt_len = tgt_len if tgt_len is not None else src_len
89
+ one_cst = tf.constant(1.0)
90
+ mask = tf.cast(mask, dtype=one_cst.dtype)
91
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
92
+
93
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
94
+
95
+
96
+ # contrastive loss function, adapted from
97
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
98
+ def contrastive_loss(logits: tf.Tensor) -> tf.Tensor:
99
+ return tf.math.reduce_mean(
100
+ keras.metrics.sparse_categorical_crossentropy(
101
+ y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True
102
+ )
103
+ )
104
+
105
+
106
+ # Copied from transformers.models.clip.modeling_tf_clip.clip_loss with clip->groupvit
107
+ def groupvit_loss(similarity: tf.Tensor) -> tf.Tensor:
108
+ caption_loss = contrastive_loss(similarity)
109
+ image_loss = contrastive_loss(tf.transpose(similarity))
110
+ return (caption_loss + image_loss) / 2.0
111
+
112
+
113
+ def hard_softmax(logits: tf.Tensor, dim: int) -> tf.Tensor:
114
+ y_soft = stable_softmax(logits, dim)
115
+ # Straight through.
116
+ index = tf.argmax(y_soft, dim)
117
+ y_hard = tf.one_hot(
118
+ index,
119
+ depth=shape_list(logits)[dim],
120
+ # TensorFlow expects axis to be -1 or between [0, 3). But received: -2
121
+ # This is why the following code snippet is used.
122
+ axis=range(len(shape_list(logits)))[dim],
123
+ dtype=y_soft.dtype,
124
+ )
125
+ ret = y_hard - tf.stop_gradient(y_soft) + y_soft
126
+
127
+ return ret
128
+
129
+
130
+ def gumbel_softmax(logits: tf.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> tf.Tensor:
131
+ gumbel_dist = tfp.distributions.Gumbel(0.0, 1.0)
132
+ gumbels = gumbel_dist.sample(tf.shape(logits), dtype=logits.dtype)
133
+
134
+ gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
135
+ y_soft = stable_softmax(gumbels, dim)
136
+
137
+ if hard:
138
+ # Straight through.
139
+ index = tf.argmax(y_soft, dim)
140
+ y_hard = tf.one_hot(
141
+ index,
142
+ depth=shape_list(logits)[dim],
143
+ # TensorFlow expects axis to be -1 or between [0, 3). But received: -2
144
+ # This is why the following code snippet is used.
145
+ axis=range(len(shape_list(logits)))[dim],
146
+ dtype=y_soft.dtype,
147
+ )
148
+ ret = y_hard - tf.stop_gradient(y_soft) + y_soft
149
+ else:
150
+ # Reparametrization trick.
151
+ ret = y_soft
152
+ return ret
153
+
154
+
155
+ def resize_attention_map(attentions: tf.Tensor, height: int, width: int, align_corners: bool = False) -> tf.Tensor:
156
+ """
157
+ Args:
158
+ attentions (`tf.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]
159
+ height (`int`): height of the output attention map
160
+ width (`int`): width of the output attention map
161
+ align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.
162
+
163
+ Returns:
164
+ `tf.Tensor`: resized attention map of shape [batch_size, groups, height, width]
165
+ """
166
+
167
+ scale = (height * width // attentions.shape[2]) ** 0.5
168
+ if height > width:
169
+ feat_width = int(np.round(width / scale))
170
+ feat_height = shape_list(attentions)[2] // feat_width
171
+ else:
172
+ feat_height = int(np.round(height / scale))
173
+ feat_width = shape_list(attentions)[2] // feat_height
174
+
175
+ batch_size = shape_list(attentions)[0]
176
+ groups = shape_list(attentions)[1] # number of group token
177
+ # [batch_size, groups, height x width, groups] -> [batch_size, groups, height, width]
178
+ attentions = tf.reshape(attentions, (batch_size, groups, feat_height, feat_width))
179
+ attentions = tf.transpose(attentions, perm=(0, 2, 3, 1))
180
+ if align_corners:
181
+ attentions = tf.compat.v1.image.resize(
182
+ attentions,
183
+ size=(height, width),
184
+ method="bilinear",
185
+ align_corners=align_corners,
186
+ )
187
+ else:
188
+ attentions = tf.image.resize(attentions, size=(height, width), method="bilinear")
189
+ attentions = tf.transpose(attentions, perm=(0, 3, 1, 2))
190
+ return attentions
191
+
192
+
193
+ def get_grouping_from_attentions(attentions: Tuple[tf.Tensor], hw_shape: Tuple[int]) -> tf.Tensor:
194
+ """
195
+ Args:
196
+ attentions (`tuple(tf.Tensor)`: tuple of attention maps returned by `TFGroupViTVisionTransformer`
197
+ hw_shape (`tuple(int)`): height and width of the output attention map
198
+ Returns:
199
+ `tf.Tensor`: the attention map of shape [batch_size, groups, height, width]
200
+ """
201
+
202
+ attn_maps = []
203
+ prev_attn_masks = None
204
+ for attn_masks in attentions:
205
+ # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups]
206
+ attn_masks = tf.transpose(attn_masks, perm=(0, 2, 1))
207
+ if prev_attn_masks is None:
208
+ prev_attn_masks = attn_masks
209
+ else:
210
+ prev_attn_masks = tf.matmul(prev_attn_masks, attn_masks)
211
+ # [batch_size, height x width, num_groups] -> [batch_size, num_groups, height x width] -> [batch_size, num_groups, height, width]
212
+ cur_attn_map = resize_attention_map(tf.transpose(prev_attn_masks, perm=(0, 2, 1)), *hw_shape)
213
+ attn_maps.append(cur_attn_map)
214
+
215
+ # [batch_size, num_groups, height, width]
216
+ final_grouping = attn_maps[-1]
217
+
218
+ return tf.stop_gradient(final_grouping)
219
+
220
+
221
+ @dataclass
222
+ class TFGroupViTModelOutput(ModelOutput):
223
+ """
224
+ Args:
225
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
226
+ Contrastive loss for image-text similarity.
227
+ logits_per_image (`tf.Tensor` of shape `(image_batch_size, text_batch_size)`):
228
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
229
+ similarity scores.
230
+ logits_per_text (`tf.Tensor` of shape `(text_batch_size, image_batch_size)`):
231
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
232
+ similarity scores.
233
+ segmentation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
234
+ Classification scores for each pixel.
235
+
236
+ <Tip warning={true}>
237
+
238
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
239
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
240
+ original image size as post-processing. You should always check your logits shape and resize as needed.
241
+
242
+ </Tip>
243
+
244
+ text_embeds (`tf.Tensor` of shape `(batch_size, output_dim`):
245
+ The text embeddings obtained by applying the projection layer to the pooled output of
246
+ [`TFGroupViTTextModel`].
247
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim`):
248
+ The image embeddings obtained by applying the projection layer to the pooled output of
249
+ [`TFGroupViTVisionModel`].
250
+ text_model_output (`TFBaseModelOutputWithPooling`):
251
+ The output of the [`TFGroupViTTextModel`].
252
+ vision_model_output (`TFBaseModelOutputWithPooling`):
253
+ The output of the [`TFGroupViTVisionModel`].
254
+ """
255
+
256
+ loss: tf.Tensor | None = None
257
+ logits_per_image: tf.Tensor = None
258
+ logits_per_text: tf.Tensor = None
259
+ segmentation_logits: tf.Tensor = None
260
+ text_embeds: tf.Tensor = None
261
+ image_embeds: tf.Tensor = None
262
+ text_model_output: TFBaseModelOutputWithPooling = None
263
+ vision_model_output: TFBaseModelOutputWithPooling = None
264
+
265
+ def to_tuple(self) -> Tuple[Any]:
266
+ return tuple(
267
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
268
+ for k in self.keys()
269
+ )
270
+
271
+
272
+ class TFGroupViTCrossAttentionLayer(keras.layers.Layer):
273
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
274
+ super().__init__(**kwargs)
275
+ self.attn = TFGroupViTAttention(config, name="attn")
276
+ self.norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm2")
277
+ self.mlp = TFGroupViTMLP(config, name="mlp")
278
+ self.norm_post = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post")
279
+ self.config = config
280
+
281
+ def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False) -> tf.Tensor:
282
+ x = query
283
+ x = x + self.attn(query, encoder_hidden_states=key)[0]
284
+ x = x + self.mlp(self.norm2(x))
285
+ x = self.norm_post(x)
286
+ return x
287
+
288
+ def build(self, input_shape=None):
289
+ if self.built:
290
+ return
291
+ self.built = True
292
+ if getattr(self, "attn", None) is not None:
293
+ with tf.name_scope(self.attn.name):
294
+ self.attn.build(None)
295
+ if getattr(self, "norm2", None) is not None:
296
+ with tf.name_scope(self.norm2.name):
297
+ self.norm2.build([None, None, self.config.hidden_size])
298
+ if getattr(self, "mlp", None) is not None:
299
+ with tf.name_scope(self.mlp.name):
300
+ self.mlp.build(None)
301
+ if getattr(self, "norm_post", None) is not None:
302
+ with tf.name_scope(self.norm_post.name):
303
+ self.norm_post.build([None, None, self.config.hidden_size])
304
+
305
+
306
+ class TFGroupViTAssignAttention(keras.layers.Layer):
307
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
308
+ super().__init__(**kwargs)
309
+ self.scale = config.hidden_size**-0.5
310
+
311
+ self.q_proj = keras.layers.Dense(config.hidden_size, name="q_proj")
312
+ self.k_proj = keras.layers.Dense(config.hidden_size, name="k_proj")
313
+ self.v_proj = keras.layers.Dense(config.hidden_size, name="v_proj")
314
+ self.proj = keras.layers.Dense(config.hidden_size, name="proj")
315
+ self.assign_eps = config.assign_eps
316
+ self.config = config
317
+
318
+ def get_attn(self, attn: tf.Tensor, gumbel: bool = True, hard: bool = True, training: bool = False) -> tf.Tensor:
319
+ if gumbel and training:
320
+ attn = gumbel_softmax(attn, dim=-2, hard=hard)
321
+ else:
322
+ if hard:
323
+ attn = hard_softmax(attn, dim=-2)
324
+ else:
325
+ attn = stable_softmax(attn, axis=-2)
326
+
327
+ return attn
328
+
329
+ def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False):
330
+ value = key
331
+ # [batch_size, query_length, channels]
332
+ query = self.q_proj(query)
333
+
334
+ # [batch_size, key_length, channels]
335
+ key = self.k_proj(key)
336
+
337
+ # [batch_size, key_length, channels]
338
+ value = self.v_proj(value)
339
+
340
+ # [batch_size, query_length, key_length]
341
+ raw_attn = tf.matmul(query, key, transpose_b=True) * self.scale
342
+
343
+ attn = self.get_attn(raw_attn, training=training)
344
+ soft_attn = self.get_attn(raw_attn, training=training, gumbel=False, hard=False)
345
+
346
+ attn = attn / (tf.math.reduce_sum(attn, axis=-1, keepdims=True) + self.assign_eps)
347
+
348
+ out = tf.matmul(attn, value)
349
+
350
+ out = self.proj(out)
351
+
352
+ return out, soft_attn
353
+
354
+ def build(self, input_shape=None):
355
+ if self.built:
356
+ return
357
+ self.built = True
358
+ if getattr(self, "q_proj", None) is not None:
359
+ with tf.name_scope(self.q_proj.name):
360
+ self.q_proj.build([None, None, self.config.hidden_size])
361
+ if getattr(self, "k_proj", None) is not None:
362
+ with tf.name_scope(self.k_proj.name):
363
+ self.k_proj.build([None, None, self.config.hidden_size])
364
+ if getattr(self, "v_proj", None) is not None:
365
+ with tf.name_scope(self.v_proj.name):
366
+ self.v_proj.build([None, None, self.config.hidden_size])
367
+ if getattr(self, "proj", None) is not None:
368
+ with tf.name_scope(self.proj.name):
369
+ self.proj.build([None, None, self.config.hidden_size])
370
+
371
+
372
+ class TFGroupViTTokenAssign(keras.layers.Layer):
373
+ def __init__(self, config: GroupViTVisionConfig, num_group_token: int, num_output_group: int, **kwargs):
374
+ super().__init__(**kwargs)
375
+ self.num_output_group = num_output_group
376
+ # norm on group_tokens
377
+ self.norm_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_tokens")
378
+ assign_mlp_ratio = (
379
+ config.assign_mlp_ratio
380
+ if isinstance(config.assign_mlp_ratio, collections.abc.Iterable)
381
+ else (config.assign_mlp_ratio, config.assign_mlp_ratio)
382
+ )
383
+ tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio]
384
+ self.mlp_inter = TFGroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group, name="mlp_inter")
385
+ self.norm_post_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post_tokens")
386
+ # norm on x
387
+ self.norm_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_x")
388
+ self.pre_assign_attn = TFGroupViTCrossAttentionLayer(config, name="pre_assign_attn")
389
+
390
+ self.assign = TFGroupViTAssignAttention(config, name="assign")
391
+ self.norm_new_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_new_x")
392
+ self.mlp_channels = TFGroupViTMLP(
393
+ config, config.hidden_size, channels_dim, config.hidden_size, name="mlp_channels"
394
+ )
395
+ self.config = config
396
+
397
+ def project_group_token(self, group_tokens: tf.Tensor) -> tf.Tensor:
398
+ """
399
+ Args:
400
+ group_tokens (tf.Tensor): group tokens, [batch_size, num_group_tokens, channels]
401
+
402
+ Returns:
403
+ projected_group_tokens (tf.Tensor): [batch_size, num_output_groups, channels]
404
+ """
405
+ # [B, num_output_groups, C] <- [B, num_group_tokens, C]
406
+ projected_group_tokens = self.mlp_inter(group_tokens)
407
+ projected_group_tokens = self.norm_post_tokens(projected_group_tokens)
408
+ return projected_group_tokens
409
+
410
+ def call(self, image_tokens: tf.Tensor, group_tokens: tf.Tensor, training: bool = False):
411
+ """
412
+ Args:
413
+ image_tokens (`tf.Tensor`): image tokens, of shape [batch_size, input_length, channels]
414
+ group_tokens (`tf.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
415
+ """
416
+
417
+ group_tokens = self.norm_tokens(group_tokens)
418
+ image_tokens = self.norm_x(image_tokens)
419
+ # [batch_size, num_output_groups, channels]
420
+ projected_group_tokens = self.project_group_token(group_tokens)
421
+ projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens)
422
+ new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens)
423
+ new_image_tokens += projected_group_tokens
424
+
425
+ new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens))
426
+
427
+ return new_image_tokens, attention
428
+
429
+ def build(self, input_shape=None):
430
+ if self.built:
431
+ return
432
+ self.built = True
433
+ if getattr(self, "norm_tokens", None) is not None:
434
+ with tf.name_scope(self.norm_tokens.name):
435
+ self.norm_tokens.build([None, None, self.config.hidden_size])
436
+ if getattr(self, "mlp_inter", None) is not None:
437
+ with tf.name_scope(self.mlp_inter.name):
438
+ self.mlp_inter.build(None)
439
+ if getattr(self, "norm_post_tokens", None) is not None:
440
+ with tf.name_scope(self.norm_post_tokens.name):
441
+ self.norm_post_tokens.build([None, None, self.config.hidden_size])
442
+ if getattr(self, "norm_x", None) is not None:
443
+ with tf.name_scope(self.norm_x.name):
444
+ self.norm_x.build([None, None, self.config.hidden_size])
445
+ if getattr(self, "pre_assign_attn", None) is not None:
446
+ with tf.name_scope(self.pre_assign_attn.name):
447
+ self.pre_assign_attn.build(None)
448
+ if getattr(self, "assign", None) is not None:
449
+ with tf.name_scope(self.assign.name):
450
+ self.assign.build(None)
451
+ if getattr(self, "norm_new_x", None) is not None:
452
+ with tf.name_scope(self.norm_new_x.name):
453
+ self.norm_new_x.build([None, None, self.config.hidden_size])
454
+ if getattr(self, "mlp_channels", None) is not None:
455
+ with tf.name_scope(self.mlp_channels.name):
456
+ self.mlp_channels.build(None)
457
+
458
+
459
+ # Adapted from transformers.models.vit.modeling_tf_vit.TFViTPatchEmbeddings with ViT->GroupViT
460
+ class TFGroupViTPatchEmbeddings(keras.layers.Layer):
461
+ """
462
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
463
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
464
+ Transformer.
465
+ """
466
+
467
+ def __init__(self, config: GroupViTConfig, **kwargs):
468
+ super().__init__(**kwargs)
469
+ image_size, patch_size = config.image_size, config.patch_size
470
+ num_channels = config.num_channels
471
+ # hidden_size is a member as it will be required in the call method
472
+ self.hidden_size = config.hidden_size
473
+
474
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
475
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
476
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
477
+ self.image_size = image_size
478
+ self.patch_size = patch_size
479
+ self.num_patches = num_patches
480
+ self.num_channels = num_channels
481
+ self.config = config
482
+
483
+ self.projection = keras.layers.Conv2D(
484
+ filters=self.hidden_size,
485
+ kernel_size=patch_size,
486
+ strides=patch_size,
487
+ padding="valid",
488
+ data_format="channels_last",
489
+ use_bias=True,
490
+ kernel_initializer=get_initializer(self.config.initializer_range),
491
+ bias_initializer="zeros",
492
+ name="projection",
493
+ )
494
+
495
+ def call(
496
+ self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False
497
+ ) -> tf.Tensor:
498
+ batch_size, num_channels, height, width = shape_list(pixel_values)
499
+ if tf.executing_eagerly() and num_channels != self.num_channels:
500
+ raise ValueError(
501
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
502
+ )
503
+ if (
504
+ not interpolate_pos_encoding
505
+ and tf.executing_eagerly()
506
+ and (height != self.image_size[0] or width != self.image_size[1])
507
+ ):
508
+ raise ValueError(
509
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
510
+ )
511
+
512
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
513
+ # So change the input format from `NCHW` to `NHWC`.
514
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
515
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
516
+
517
+ projection = self.projection(pixel_values)
518
+
519
+ # Change the 2D spatial dimensions to a single temporal dimension.
520
+ # shape = (batch_size, num_patches, out_channels=embed_dim)
521
+ num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0])
522
+ # In the TFGroupViTVisionEmbeddings the embeddings from this layer will be layer normalized
523
+ # LayerNormalization layer needs to have static last dimension (otherwise the test_keras_save_load fails with symbolic tensors)
524
+ # This is why we have used the hidden_size in the reshape method
525
+ embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, self.hidden_size))
526
+
527
+ return embeddings
528
+
529
+ def build(self, input_shape=None):
530
+ if self.built:
531
+ return
532
+ self.built = True
533
+ if getattr(self, "projection", None) is not None:
534
+ with tf.name_scope(self.projection.name):
535
+ self.projection.build([None, None, None, self.num_channels])
536
+
537
+
538
+ # Adapted from transformers.vit.modeling_tf_vit.TFViTEmbeddings
539
+ class TFGroupViTVisionEmbeddings(keras.layers.Layer):
540
+ """
541
+ Construct the position and patch embeddings.
542
+
543
+ """
544
+
545
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
546
+ super().__init__(**kwargs)
547
+
548
+ self.patch_embeddings = TFGroupViTPatchEmbeddings(config, name="patch_embeddings")
549
+ self.dropout = keras.layers.Dropout(rate=config.dropout, name="dropout")
550
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
551
+ self.config = config
552
+
553
+ def build(self, input_shape=None):
554
+ num_patches = self.patch_embeddings.num_patches
555
+ self.position_embeddings = self.add_weight(
556
+ shape=(1, num_patches, self.config.hidden_size),
557
+ initializer="zeros",
558
+ trainable=True,
559
+ name="position_embeddings",
560
+ )
561
+
562
+ if self.built:
563
+ return
564
+ self.built = True
565
+ if getattr(self, "patch_embeddings", None) is not None:
566
+ with tf.name_scope(self.patch_embeddings.name):
567
+ self.patch_embeddings.build(None)
568
+ if getattr(self, "dropout", None) is not None:
569
+ with tf.name_scope(self.dropout.name):
570
+ self.dropout.build(None)
571
+ if getattr(self, "layernorm", None) is not None:
572
+ with tf.name_scope(self.layernorm.name):
573
+ self.layernorm.build([None, None, self.config.hidden_size])
574
+
575
+ def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor:
576
+ """
577
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
578
+ resolution images.
579
+
580
+ Source:
581
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
582
+ """
583
+
584
+ batch_size, num_patches, dim = shape_list(embeddings)
585
+ num_positions = shape_list(self.position_embeddings)[1]
586
+
587
+ if num_patches == num_positions and height == width:
588
+ return self.position_embeddings
589
+ patch_pos_embed = self.position_embeddings
590
+ h0 = height // self.config.patch_size
591
+ w0 = width // self.config.patch_size
592
+ patch_pos_embed = tf.image.resize(
593
+ images=tf.reshape(
594
+ patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
595
+ ),
596
+ size=(h0, w0),
597
+ method="bicubic",
598
+ )
599
+ patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim))
600
+ return patch_pos_embed
601
+
602
+ def call(
603
+ self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False
604
+ ) -> tf.Tensor:
605
+ _, _, height, width = shape_list(pixel_values)
606
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
607
+ embeddings = self.layernorm(embeddings)
608
+
609
+ # add positional encoding to each token
610
+ if interpolate_pos_encoding:
611
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
612
+ else:
613
+ embeddings = embeddings + self.position_embeddings
614
+
615
+ embeddings = self.dropout(embeddings)
616
+
617
+ return embeddings
618
+
619
+
620
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextEmbeddings with CLIP->GroupViT
621
+ class TFGroupViTTextEmbeddings(keras.layers.Layer):
622
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
623
+ super().__init__(**kwargs)
624
+
625
+ self.embed_dim = config.hidden_size
626
+
627
+ self.config = config
628
+
629
+ def build(self, input_shape: tf.TensorShape = None):
630
+ with tf.name_scope("token_embedding"):
631
+ self.weight = self.add_weight(
632
+ shape=(self.config.vocab_size, self.embed_dim),
633
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
634
+ trainable=True,
635
+ name="weight",
636
+ )
637
+
638
+ with tf.name_scope("position_embedding"):
639
+ self.position_embedding = self.add_weight(
640
+ shape=(self.config.max_position_embeddings, self.embed_dim),
641
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
642
+ trainable=True,
643
+ name="embeddings",
644
+ )
645
+
646
+ super().build(input_shape)
647
+
648
+ def call(
649
+ self,
650
+ input_ids: tf.Tensor = None,
651
+ position_ids: tf.Tensor = None,
652
+ inputs_embeds: tf.Tensor = None,
653
+ ) -> tf.Tensor:
654
+ """
655
+ Applies embedding based on inputs tensor.
656
+
657
+ Returns:
658
+ final_embeddings (`tf.Tensor`): output embedding tensor.
659
+ """
660
+ if input_ids is None and inputs_embeds is None:
661
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
662
+
663
+ if inputs_embeds is None:
664
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
665
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
666
+
667
+ input_shape = shape_list(inputs_embeds)[:-1]
668
+
669
+ if position_ids is None:
670
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
671
+
672
+ position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)
673
+ position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
674
+ final_embeddings = inputs_embeds + position_embeds
675
+
676
+ return final_embeddings
677
+
678
+
679
+ class TFGroupViTStage(keras.layers.Layer):
680
+ """This corresponds to the `GroupingLayer` class in the GroupViT implementation."""
681
+
682
+ def __init__(
683
+ self,
684
+ config: GroupViTVisionConfig,
685
+ depth: int,
686
+ num_prev_group_token: int,
687
+ num_group_token: int,
688
+ num_output_group: int,
689
+ **kwargs,
690
+ ):
691
+ super().__init__(**kwargs)
692
+ self.config = config
693
+ self.depth = depth
694
+ self.num_group_token = num_group_token
695
+ self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(depth)]
696
+
697
+ if num_group_token > 0:
698
+ self.downsample = TFGroupViTTokenAssign(
699
+ config=config,
700
+ num_group_token=num_group_token,
701
+ num_output_group=num_output_group,
702
+ name="downsample",
703
+ )
704
+ else:
705
+ self.downsample = None
706
+
707
+ if num_prev_group_token > 0 and num_group_token > 0:
708
+ self.group_projector = [
709
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="group_projector.0"),
710
+ TFGroupViTMixerMLP(
711
+ config, num_prev_group_token, config.hidden_size // 2, num_group_token, name="group_projector.1"
712
+ ),
713
+ ]
714
+ else:
715
+ self.group_projector = None
716
+
717
+ def build(self, input_shape=None):
718
+ if self.num_group_token > 0:
719
+ self.group_token = self.add_weight(
720
+ shape=(1, self.num_group_token, self.config.hidden_size),
721
+ initializer="zeros",
722
+ trainable=True,
723
+ name="group_token",
724
+ )
725
+ else:
726
+ self.group_token = None
727
+
728
+ if self.built:
729
+ return
730
+ self.built = True
731
+ if getattr(self, "downsample", None) is not None:
732
+ with tf.name_scope(self.downsample.name):
733
+ self.downsample.build(None)
734
+ if getattr(self, "layers", None) is not None:
735
+ for layer in self.layers:
736
+ with tf.name_scope(layer.name):
737
+ layer.build(None)
738
+ if getattr(self, "group_projector", None) is not None:
739
+ with tf.name_scope(self.group_projector[0].name):
740
+ self.group_projector[0].build([None, None, self.config.hidden_size])
741
+ with tf.name_scope(self.group_projector[1].name):
742
+ self.group_projector[1].build(None)
743
+
744
+ @property
745
+ def with_group_token(self):
746
+ return self.group_token is not None
747
+
748
+ def split_x(self, x: tf.Tensor) -> tf.Tensor:
749
+ if self.with_group_token:
750
+ return x[:, : -self.num_group_token], x[:, -self.num_group_token :]
751
+ else:
752
+ return x, None
753
+
754
+ def concat_x(self, x: tf.Tensor, group_token: tf.Tensor | None = None) -> tf.Tensor:
755
+ if group_token is None:
756
+ return x
757
+ return tf.concat([x, group_token], axis=1)
758
+
759
+ def call(
760
+ self,
761
+ hidden_states: tf.Tensor,
762
+ prev_group_token: tf.Tensor | None = None,
763
+ output_attentions: bool = False,
764
+ training: bool = False,
765
+ ) -> Tuple[tf.Tensor]:
766
+ """
767
+ Args:
768
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
769
+ attention_mask (`tf.Tensor`): attention mask of size
770
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
771
+ `(config.encoder_attention_heads,)`.
772
+ output_attentions (`bool`, *optional*):
773
+ Whether or not to return the grouping tensors of Grouping block.
774
+ """
775
+ if self.with_group_token:
776
+ group_token = tf.tile(self.group_token, multiples=(shape_list(hidden_states)[0], 1, 1))
777
+ if self.group_projector is not None:
778
+ for layer in self.group_projector:
779
+ prev_group_token = layer(prev_group_token)
780
+ group_token = group_token + prev_group_token
781
+ else:
782
+ group_token = None
783
+
784
+ x = hidden_states
785
+
786
+ cat_x = self.concat_x(x, group_token)
787
+ for layer in self.layers:
788
+ layer_out = layer(
789
+ cat_x,
790
+ attention_mask=None,
791
+ causal_attention_mask=None,
792
+ output_attentions=None,
793
+ )
794
+ cat_x = layer_out[0]
795
+
796
+ x, group_token = self.split_x(cat_x)
797
+
798
+ attention = None
799
+ if self.downsample is not None:
800
+ x, attention = self.downsample(x, group_token)
801
+
802
+ outputs = (x, group_token)
803
+ if output_attentions:
804
+ outputs = outputs + (attention,)
805
+
806
+ return outputs
807
+
808
+
809
+ class TFGroupViTMLP(keras.layers.Layer):
810
+ def __init__(
811
+ self,
812
+ config: GroupViTVisionConfig,
813
+ hidden_size: Optional[int] = None,
814
+ intermediate_size: Optional[int] = None,
815
+ output_size: Optional[int] = None,
816
+ **kwargs,
817
+ ):
818
+ super().__init__(**kwargs)
819
+ self.config = config
820
+ self.activation_fn = get_tf_activation(config.hidden_act)
821
+ hidden_size = hidden_size if hidden_size is not None else config.hidden_size
822
+ intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
823
+ output_size = output_size if output_size is not None else hidden_size
824
+ self.fc1 = keras.layers.Dense(intermediate_size, name="fc1")
825
+ self.fc2 = keras.layers.Dense(output_size, name="fc2")
826
+ self.intermediate_size = intermediate_size
827
+ self.hidden_size = hidden_size
828
+
829
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
830
+ hidden_states = self.fc1(hidden_states)
831
+ hidden_states = self.activation_fn(hidden_states)
832
+ hidden_states = self.fc2(hidden_states)
833
+ return hidden_states
834
+
835
+ def build(self, input_shape=None):
836
+ if self.built:
837
+ return
838
+ self.built = True
839
+ if getattr(self, "fc1", None) is not None:
840
+ with tf.name_scope(self.fc1.name):
841
+ self.fc1.build([None, None, self.hidden_size])
842
+ if getattr(self, "fc2", None) is not None:
843
+ with tf.name_scope(self.fc2.name):
844
+ self.fc2.build([None, None, self.intermediate_size])
845
+
846
+
847
+ class TFGroupViTMixerMLP(TFGroupViTMLP):
848
+ def call(self, x, training: bool = False):
849
+ x = super().call(hidden_states=tf.transpose(x, perm=(0, 2, 1)))
850
+ return tf.transpose(x, perm=(0, 2, 1))
851
+
852
+
853
+ # Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPAttention
854
+ class TFGroupViTAttention(keras.layers.Layer):
855
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
856
+
857
+ def __init__(self, config: GroupViTConfig, **kwargs):
858
+ super().__init__(**kwargs)
859
+
860
+ self.embed_dim = config.hidden_size
861
+ self.num_attention_heads = config.num_attention_heads
862
+ self.attention_head_size = self.embed_dim // self.num_attention_heads
863
+ if self.attention_head_size * self.num_attention_heads != self.embed_dim:
864
+ raise ValueError(
865
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
866
+ f" {self.num_attention_heads})."
867
+ )
868
+
869
+ factor = config.initializer_factor
870
+ in_proj_std = (self.embed_dim**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor
871
+ out_proj_std = (self.embed_dim**-0.5) * factor
872
+
873
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
874
+
875
+ self.q_proj = keras.layers.Dense(
876
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="q_proj"
877
+ )
878
+ self.k_proj = keras.layers.Dense(
879
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="k_proj"
880
+ )
881
+ self.v_proj = keras.layers.Dense(
882
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="v_proj"
883
+ )
884
+
885
+ self.dropout = keras.layers.Dropout(rate=config.attention_dropout)
886
+
887
+ self.out_proj = keras.layers.Dense(
888
+ units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name="out_proj"
889
+ )
890
+
891
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention.transpose_for_scores
892
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
893
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
894
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
895
+
896
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
897
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
898
+
899
+ def call(
900
+ self,
901
+ hidden_states: tf.Tensor,
902
+ attention_mask: tf.Tensor = None,
903
+ causal_attention_mask: tf.Tensor = None,
904
+ output_attentions: bool = None,
905
+ encoder_hidden_states: tf.Tensor = None,
906
+ training: bool = False,
907
+ ) -> Tuple[tf.Tensor]:
908
+ """Input shape: Batch x Time x Channel"""
909
+
910
+ batch_size = shape_list(hidden_states)[0]
911
+ is_cross_attention = encoder_hidden_states is not None
912
+
913
+ mixed_query_layer = self.q_proj(inputs=hidden_states)
914
+ if is_cross_attention:
915
+ mixed_key_layer = self.k_proj(inputs=encoder_hidden_states)
916
+ mixed_value_layer = self.v_proj(inputs=encoder_hidden_states)
917
+ else:
918
+ mixed_key_layer = self.k_proj(inputs=hidden_states)
919
+ mixed_value_layer = self.v_proj(inputs=hidden_states)
920
+
921
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
922
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
923
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
924
+
925
+ # Take the dot product between "query" and "key" to get the raw attention scores.
926
+ # (batch size, num_heads, seq_len_q, seq_len_k)
927
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
928
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
929
+ attention_scores = tf.divide(attention_scores, dk)
930
+
931
+ # apply the causal_attention_mask first
932
+ if causal_attention_mask is not None:
933
+ # Apply the causal attention mask (precomputed for all layers in TFCLIPModel call() function)
934
+ attention_scores = tf.add(attention_scores, causal_attention_mask)
935
+
936
+ if attention_mask is not None:
937
+ # Apply the attention mask (precomputed for all layers in TFCLIPModel call() function)
938
+ attention_scores = tf.add(attention_scores, attention_mask)
939
+
940
+ # Normalize the attention scores to probabilities.
941
+ _attention_probs = stable_softmax(logits=attention_scores, axis=-1)
942
+
943
+ # This is actually dropping out entire tokens to attend to, which might
944
+ # seem a bit unusual, but is taken from the original Transformer paper.
945
+ attention_probs = self.dropout(inputs=_attention_probs)
946
+
947
+ attention_output = tf.matmul(attention_probs, value_layer)
948
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
949
+
950
+ # (batch_size, seq_len_q, embed_dim)
951
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim))
952
+
953
+ attention_output = self.out_proj(attention_output)
954
+ # In TFBert, attention weights are returned after dropout.
955
+ # However, in CLIP, they are returned before dropout.
956
+ outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,)
957
+
958
+ return outputs
959
+
960
+ def build(self, input_shape=None):
961
+ if self.built:
962
+ return
963
+ self.built = True
964
+ if getattr(self, "q_proj", None) is not None:
965
+ with tf.name_scope(self.q_proj.name):
966
+ self.q_proj.build([None, None, self.embed_dim])
967
+ if getattr(self, "k_proj", None) is not None:
968
+ with tf.name_scope(self.k_proj.name):
969
+ self.k_proj.build([None, None, self.embed_dim])
970
+ if getattr(self, "v_proj", None) is not None:
971
+ with tf.name_scope(self.v_proj.name):
972
+ self.v_proj.build([None, None, self.embed_dim])
973
+ if getattr(self, "out_proj", None) is not None:
974
+ with tf.name_scope(self.out_proj.name):
975
+ self.out_proj.build([None, None, self.embed_dim])
976
+
977
+
978
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPEncoderLayer with CLIP->GroupViT
979
+ class TFGroupViTEncoderLayer(keras.layers.Layer):
980
+ def __init__(self, config: GroupViTConfig, **kwargs):
981
+ super().__init__(**kwargs)
982
+
983
+ self.embed_dim = config.hidden_size
984
+ self.self_attn = TFGroupViTAttention(config, name="self_attn")
985
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
986
+ self.mlp = TFGroupViTMLP(config, name="mlp")
987
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
988
+
989
+ def call(
990
+ self,
991
+ hidden_states: tf.Tensor,
992
+ attention_mask: tf.Tensor,
993
+ causal_attention_mask: tf.Tensor,
994
+ output_attentions: bool,
995
+ training: bool = False,
996
+ ) -> Tuple[tf.Tensor]:
997
+ """
998
+ Args:
999
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1000
+ attention_mask (`tf.Tensor`): attention mask of size
1001
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
1002
+ causal_attention_mask (`tf.Tensor`): causal attention mask of size
1003
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
1004
+ output_attentions (`bool`):
1005
+ Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned
1006
+ tensors for more detail.
1007
+ """
1008
+ residual = hidden_states
1009
+
1010
+ hidden_states = self.layer_norm1(inputs=hidden_states)
1011
+ attention_outputs = self.self_attn(
1012
+ hidden_states=hidden_states,
1013
+ attention_mask=attention_mask,
1014
+ causal_attention_mask=causal_attention_mask,
1015
+ output_attentions=output_attentions,
1016
+ training=training,
1017
+ )
1018
+ hidden_states = attention_outputs[0]
1019
+ hidden_states = residual + hidden_states
1020
+
1021
+ residual = hidden_states
1022
+ hidden_states = self.layer_norm2(inputs=hidden_states)
1023
+ hidden_states = self.mlp(hidden_states=hidden_states)
1024
+ hidden_states = residual + hidden_states
1025
+
1026
+ outputs = (hidden_states,) + attention_outputs[1:] # add attentions if we output them
1027
+
1028
+ return outputs
1029
+
1030
+ def build(self, input_shape=None):
1031
+ if self.built:
1032
+ return
1033
+ self.built = True
1034
+ if getattr(self, "self_attn", None) is not None:
1035
+ with tf.name_scope(self.self_attn.name):
1036
+ self.self_attn.build(None)
1037
+ if getattr(self, "layer_norm1", None) is not None:
1038
+ with tf.name_scope(self.layer_norm1.name):
1039
+ self.layer_norm1.build([None, None, self.embed_dim])
1040
+ if getattr(self, "mlp", None) is not None:
1041
+ with tf.name_scope(self.mlp.name):
1042
+ self.mlp.build(None)
1043
+ if getattr(self, "layer_norm2", None) is not None:
1044
+ with tf.name_scope(self.layer_norm2.name):
1045
+ self.layer_norm2.build([None, None, self.embed_dim])
1046
+
1047
+
1048
+ # Adapted from transformers.models.clip.modeling_tf_clip.TFGroupViTTextEncoder
1049
+ class TFGroupViTTextEncoder(keras.layers.Layer):
1050
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
1051
+ super().__init__(**kwargs)
1052
+
1053
+ self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)]
1054
+
1055
+ def call(
1056
+ self,
1057
+ hidden_states,
1058
+ attention_mask: tf.Tensor,
1059
+ causal_attention_mask: tf.Tensor,
1060
+ output_attentions: bool,
1061
+ output_hidden_states: bool,
1062
+ return_dict: bool,
1063
+ training: bool = False,
1064
+ ) -> Union[Tuple, TFBaseModelOutput]:
1065
+ encoder_states = () if output_hidden_states else None
1066
+ all_attentions = () if output_attentions else None
1067
+
1068
+ for idx, encoder_layer in enumerate(self.layers):
1069
+ if output_hidden_states:
1070
+ encoder_states = encoder_states + (hidden_states,)
1071
+
1072
+ layer_outputs = encoder_layer(
1073
+ hidden_states,
1074
+ attention_mask,
1075
+ causal_attention_mask,
1076
+ output_attentions=output_attentions,
1077
+ )
1078
+ hidden_states = layer_outputs[0]
1079
+
1080
+ if output_attentions:
1081
+ all_attentions = all_attentions + (layer_outputs[1],)
1082
+
1083
+ if output_hidden_states:
1084
+ encoder_states = encoder_states + (hidden_states,)
1085
+
1086
+ if not return_dict:
1087
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
1088
+ return TFBaseModelOutput(
1089
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
1090
+ )
1091
+
1092
+ def build(self, input_shape=None):
1093
+ if self.built:
1094
+ return
1095
+ self.built = True
1096
+ if getattr(self, "layers", None) is not None:
1097
+ for layer in self.layers:
1098
+ with tf.name_scope(layer.name):
1099
+ layer.build(None)
1100
+
1101
+
1102
+ class TFGroupViTVisionEncoder(keras.layers.Layer):
1103
+ def __init__(self, config: GroupViTVisionConfig, **kwargs) -> None:
1104
+ super().__init__(**kwargs)
1105
+
1106
+ self.stages = [
1107
+ TFGroupViTStage(
1108
+ config=config,
1109
+ depth=config.depths[i],
1110
+ num_group_token=config.num_group_tokens[i],
1111
+ num_output_group=config.num_output_groups[i],
1112
+ num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0,
1113
+ name=f"stages_._{i}",
1114
+ )
1115
+ for i in range(len(config.depths))
1116
+ ]
1117
+
1118
+ def call(
1119
+ self,
1120
+ hidden_states: tf.Tensor,
1121
+ output_hidden_states: bool,
1122
+ output_attentions: bool,
1123
+ return_dict: bool,
1124
+ training: bool = False,
1125
+ ) -> Union[tuple, TFBaseModelOutput]:
1126
+ all_hidden_states = () if output_hidden_states else None
1127
+ all_groupings = () if output_attentions else None
1128
+
1129
+ group_tokens = None
1130
+
1131
+ for stage in self.stages:
1132
+ if output_hidden_states:
1133
+ all_hidden_states = all_hidden_states + (hidden_states,)
1134
+
1135
+ layer_outputs = stage(hidden_states, group_tokens, output_attentions)
1136
+
1137
+ hidden_states = layer_outputs[0]
1138
+ group_tokens = layer_outputs[1]
1139
+
1140
+ if output_attentions and layer_outputs[2] is not None:
1141
+ all_groupings = all_groupings + (layer_outputs[2],)
1142
+
1143
+ if output_hidden_states:
1144
+ all_hidden_states = all_hidden_states + (hidden_states,)
1145
+
1146
+ if not return_dict:
1147
+ return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)
1148
+ return TFBaseModelOutput(
1149
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings
1150
+ )
1151
+
1152
+ def build(self, input_shape=None):
1153
+ if self.built:
1154
+ return
1155
+ self.built = True
1156
+ if getattr(self, "stages", None) is not None:
1157
+ for layer in self.stages:
1158
+ with tf.name_scope(layer.name):
1159
+ layer.build(None)
1160
+
1161
+
1162
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder
1163
+ class TFGroupViTTextTransformer(keras.layers.Layer):
1164
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
1165
+ super().__init__(**kwargs)
1166
+
1167
+ self.embeddings = TFGroupViTTextEmbeddings(config, name="embeddings")
1168
+ self.encoder = TFGroupViTTextEncoder(config, name="encoder")
1169
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
1170
+
1171
+ # For `pooled_output` computation
1172
+ self.eos_token_id = config.eos_token_id
1173
+ self.embed_dim = config.hidden_size
1174
+
1175
+ def call(
1176
+ self,
1177
+ input_ids: TFModelInputType,
1178
+ attention_mask: tf.Tensor,
1179
+ position_ids: tf.Tensor,
1180
+ output_attentions: bool,
1181
+ output_hidden_states: bool,
1182
+ return_dict: bool,
1183
+ training: bool = False,
1184
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1185
+ input_shape = shape_list(input_ids)
1186
+
1187
+ embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids)
1188
+
1189
+ batch_size, seq_length = input_shape
1190
+ # CLIP's text model uses causal mask, prepare it here.
1191
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
1192
+ causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype)
1193
+
1194
+ # check attention mask and invert
1195
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1196
+ attention_mask = _expand_mask(attention_mask)
1197
+
1198
+ encoder_outputs = self.encoder(
1199
+ hidden_states=embedding_output,
1200
+ attention_mask=attention_mask,
1201
+ causal_attention_mask=causal_attention_mask,
1202
+ output_attentions=output_attentions,
1203
+ output_hidden_states=output_hidden_states,
1204
+ return_dict=return_dict,
1205
+ training=training,
1206
+ )
1207
+
1208
+ sequence_output = encoder_outputs[0]
1209
+ sequence_output = self.final_layer_norm(inputs=sequence_output)
1210
+
1211
+ if self.eos_token_id == 2:
1212
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
1213
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
1214
+ # ------------------------------------------------------------
1215
+ # text_embeds.shape = [batch_size, n_ctx, transformer.width]
1216
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
1217
+ pooled_output = tf.gather_nd(
1218
+ params=sequence_output,
1219
+ indices=tf.stack(
1220
+ values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1
1221
+ ),
1222
+ )
1223
+ else:
1224
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
1225
+ pooled_output = tf.gather_nd(
1226
+ params=sequence_output,
1227
+ indices=tf.stack(
1228
+ values=(
1229
+ tf.range(input_shape[0], dtype=tf.int64),
1230
+ tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1),
1231
+ ),
1232
+ axis=1,
1233
+ ),
1234
+ )
1235
+
1236
+ if not return_dict:
1237
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1238
+
1239
+ return TFBaseModelOutputWithPooling(
1240
+ last_hidden_state=sequence_output,
1241
+ pooler_output=pooled_output,
1242
+ hidden_states=encoder_outputs.hidden_states,
1243
+ attentions=encoder_outputs.attentions,
1244
+ )
1245
+
1246
+ def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32):
1247
+ # It is possible with an unspecified sequence length for seq_length to be
1248
+ # a runtime value, which is unsupported by tf.constant. Per the TensorFlow
1249
+ # docs, tf.fill can handle runtime dynamic shapes:
1250
+ # https://www.tensorflow.org/api_docs/python/tf/fill
1251
+ diag = tf.cast(tf.fill((seq_length,), 0.0), dtype)
1252
+
1253
+ # set an additive 2D attention mask with all places being masked
1254
+ to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype)
1255
+
1256
+ # set diagonal & lower triangular parts to 0 (i.e. the places not to be masked)
1257
+ # TIP: think the 2D matrix as the space of (query_seq, key_seq)
1258
+ to_mask = tf.linalg.band_part(to_mask, 0, -1)
1259
+ # to_mask = tf.linalg.band_part(to_mask, -1, 0)
1260
+ to_mask = tf.linalg.set_diag(to_mask, diagonal=diag)
1261
+
1262
+ return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length))
1263
+
1264
+ def build(self, input_shape=None):
1265
+ if self.built:
1266
+ return
1267
+ self.built = True
1268
+ if getattr(self, "embeddings", None) is not None:
1269
+ with tf.name_scope(self.embeddings.name):
1270
+ self.embeddings.build(None)
1271
+ if getattr(self, "encoder", None) is not None:
1272
+ with tf.name_scope(self.encoder.name):
1273
+ self.encoder.build(None)
1274
+ if getattr(self, "final_layer_norm", None) is not None:
1275
+ with tf.name_scope(self.final_layer_norm.name):
1276
+ self.final_layer_norm.build([None, None, self.embed_dim])
1277
+
1278
+
1279
+ # Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPVisionTransformer
1280
+ class TFGroupViTVisionTransformer(keras.layers.Layer):
1281
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
1282
+ super().__init__(**kwargs)
1283
+
1284
+ self.embeddings = TFGroupViTVisionEmbeddings(config, name="embeddings")
1285
+ self.encoder = TFGroupViTVisionEncoder(config, name="encoder")
1286
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
1287
+ self.embed_dim = config.hidden_size
1288
+
1289
+ def call(
1290
+ self,
1291
+ pixel_values: TFModelInputType,
1292
+ output_attentions: bool,
1293
+ output_hidden_states: bool,
1294
+ return_dict: bool,
1295
+ training: bool = False,
1296
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
1297
+ embedding_output = self.embeddings(pixel_values)
1298
+
1299
+ encoder_outputs = self.encoder(
1300
+ hidden_states=embedding_output,
1301
+ output_hidden_states=output_hidden_states,
1302
+ output_attentions=output_attentions,
1303
+ return_dict=return_dict,
1304
+ )
1305
+
1306
+ last_hidden_state = encoder_outputs[0]
1307
+
1308
+ # normalize the last hidden state
1309
+ last_hidden_state = self.layernorm(last_hidden_state)
1310
+ pooled_output = tf.math.reduce_mean(last_hidden_state, axis=1)
1311
+
1312
+ if not return_dict:
1313
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1314
+
1315
+ return TFBaseModelOutputWithPooling(
1316
+ last_hidden_state=last_hidden_state,
1317
+ pooler_output=pooled_output,
1318
+ hidden_states=encoder_outputs.hidden_states,
1319
+ attentions=encoder_outputs.attentions,
1320
+ )
1321
+
1322
+ def build(self, input_shape=None):
1323
+ if self.built:
1324
+ return
1325
+ self.built = True
1326
+ if getattr(self, "embeddings", None) is not None:
1327
+ with tf.name_scope(self.embeddings.name):
1328
+ self.embeddings.build(None)
1329
+ if getattr(self, "encoder", None) is not None:
1330
+ with tf.name_scope(self.encoder.name):
1331
+ self.encoder.build(None)
1332
+ if getattr(self, "layernorm", None) is not None:
1333
+ with tf.name_scope(self.layernorm.name):
1334
+ self.layernorm.build([None, None, self.embed_dim])
1335
+
1336
+
1337
+ @keras_serializable
1338
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextMainLayer with CLIP->GroupViT
1339
+ class TFGroupViTTextMainLayer(keras.layers.Layer):
1340
+ config_class = GroupViTTextConfig
1341
+
1342
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
1343
+ super().__init__(**kwargs)
1344
+ self.config = config
1345
+ self.text_model = TFGroupViTTextTransformer(config, name="text_model")
1346
+
1347
+ def get_input_embeddings(self) -> keras.layers.Layer:
1348
+ return self.text_model.embeddings
1349
+
1350
+ def set_input_embeddings(self, value: tf.Variable):
1351
+ self.text_model.embeddings.weight = value
1352
+ self.text_model.embeddings.vocab_size = shape_list(value)[0]
1353
+
1354
+ @unpack_inputs
1355
+ def call(
1356
+ self,
1357
+ input_ids: TFModelInputType | None = None,
1358
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1359
+ position_ids: np.ndarray | tf.Tensor | None = None,
1360
+ output_attentions: Optional[bool] = None,
1361
+ output_hidden_states: Optional[bool] = None,
1362
+ return_dict: Optional[bool] = None,
1363
+ training: bool = False,
1364
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1365
+ if input_ids is None:
1366
+ raise ValueError("You have to specify input_ids")
1367
+
1368
+ input_shape = shape_list(input_ids)
1369
+
1370
+ if attention_mask is None:
1371
+ attention_mask = tf.fill(dims=input_shape, value=1)
1372
+
1373
+ text_model_outputs = self.text_model(
1374
+ input_ids=input_ids,
1375
+ attention_mask=attention_mask,
1376
+ position_ids=position_ids,
1377
+ output_attentions=output_attentions,
1378
+ output_hidden_states=output_hidden_states,
1379
+ return_dict=return_dict,
1380
+ training=training,
1381
+ )
1382
+
1383
+ return text_model_outputs
1384
+
1385
+ def build(self, input_shape=None):
1386
+ if self.built:
1387
+ return
1388
+ self.built = True
1389
+ if getattr(self, "text_model", None) is not None:
1390
+ with tf.name_scope(self.text_model.name):
1391
+ self.text_model.build(None)
1392
+
1393
+
1394
+ @keras_serializable
1395
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPVisionMainLayer with CLIP->GroupViT
1396
+ class TFGroupViTVisionMainLayer(keras.layers.Layer):
1397
+ config_class = GroupViTVisionConfig
1398
+
1399
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
1400
+ super().__init__(**kwargs)
1401
+ self.config = config
1402
+ self.vision_model = TFGroupViTVisionTransformer(config, name="vision_model")
1403
+
1404
+ def get_input_embeddings(self) -> keras.layers.Layer:
1405
+ return self.vision_model.embeddings
1406
+
1407
+ @unpack_inputs
1408
+ def call(
1409
+ self,
1410
+ pixel_values: TFModelInputType | None = None,
1411
+ output_attentions: Optional[bool] = None,
1412
+ output_hidden_states: Optional[bool] = None,
1413
+ return_dict: Optional[bool] = None,
1414
+ training: bool = False,
1415
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1416
+ if pixel_values is None:
1417
+ raise ValueError("You have to specify pixel_values")
1418
+
1419
+ vision_model_outputs = self.vision_model(
1420
+ pixel_values=pixel_values,
1421
+ output_attentions=output_attentions,
1422
+ output_hidden_states=output_hidden_states,
1423
+ return_dict=return_dict,
1424
+ training=training,
1425
+ )
1426
+
1427
+ return vision_model_outputs
1428
+
1429
+ def build(self, input_shape=None):
1430
+ if self.built:
1431
+ return
1432
+ self.built = True
1433
+ if getattr(self, "vision_model", None) is not None:
1434
+ with tf.name_scope(self.vision_model.name):
1435
+ self.vision_model.build(None)
1436
+
1437
+
1438
+ @keras_serializable
1439
+ # Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPMainLayer
1440
+ class TFGroupViTMainLayer(keras.layers.Layer):
1441
+ config_class = GroupViTConfig
1442
+
1443
+ def __init__(self, config: GroupViTConfig, **kwargs):
1444
+ super().__init__(**kwargs)
1445
+
1446
+ if not isinstance(config.text_config, GroupViTTextConfig):
1447
+ raise ValueError(
1448
+ "config.text_config is expected to be of type GroupViTTextConfig but is of type"
1449
+ f" {type(config.text_config)}."
1450
+ )
1451
+
1452
+ if not isinstance(config.vision_config, GroupViTVisionConfig):
1453
+ raise ValueError(
1454
+ "config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
1455
+ f" {type(config.vision_config)}."
1456
+ )
1457
+
1458
+ self.config = config
1459
+
1460
+ text_config = config.text_config
1461
+ vision_config = config.vision_config
1462
+
1463
+ self.projection_dim = config.projection_dim
1464
+ self.projection_intermediate_dim = config.projection_intermediate_dim
1465
+ self.text_embed_dim = text_config.hidden_size
1466
+ self.vision_embed_dim = vision_config.hidden_size
1467
+
1468
+ self.text_model = TFGroupViTTextTransformer(text_config, name="text_model")
1469
+ self.vision_model = TFGroupViTVisionTransformer(vision_config, name="vision_model")
1470
+
1471
+ self.visual_projection = [
1472
+ keras.layers.Dense(self.projection_intermediate_dim, name="visual_projection.0"),
1473
+ keras.layers.BatchNormalization(name="visual_projection.1", momentum=0.9, epsilon=1e-5),
1474
+ keras.layers.ReLU(name="visual_projection.2"),
1475
+ keras.layers.Dense(self.projection_dim, name="visual_projection.3"),
1476
+ ]
1477
+ self.text_projection = [
1478
+ keras.layers.Dense(self.projection_intermediate_dim, name="text_projection.0"),
1479
+ keras.layers.BatchNormalization(name="text_projection.1", momentum=0.9, epsilon=1e-5),
1480
+ keras.layers.ReLU(name="text_projection.2"),
1481
+ keras.layers.Dense(self.projection_dim, name="text_projection.3"),
1482
+ ]
1483
+
1484
+ def build(self, input_shape=None):
1485
+ self.logit_scale = self.add_weight(
1486
+ shape=(1,),
1487
+ initializer=keras.initializers.Constant(self.config.logit_scale_init_value),
1488
+ trainable=True,
1489
+ name="logit_scale",
1490
+ )
1491
+
1492
+ if self.built:
1493
+ return
1494
+ self.built = True
1495
+ if getattr(self, "text_model", None) is not None:
1496
+ with tf.name_scope(self.text_model.name):
1497
+ self.text_model.build(None)
1498
+ if getattr(self, "vision_model", None) is not None:
1499
+ with tf.name_scope(self.vision_model.name):
1500
+ self.vision_model.build(None)
1501
+ if getattr(self, "visual_projection", None) is not None:
1502
+ with tf.name_scope(self.visual_projection[0].name):
1503
+ self.visual_projection[0].build([None, None, None, self.vision_embed_dim])
1504
+ with tf.name_scope(self.visual_projection[1].name):
1505
+ self.visual_projection[1].build((None, self.projection_intermediate_dim))
1506
+ with tf.name_scope(self.visual_projection[3].name):
1507
+ self.visual_projection[3].build([None, None, None, self.projection_intermediate_dim])
1508
+ if getattr(self, "text_projection", None) is not None:
1509
+ with tf.name_scope(self.text_projection[0].name):
1510
+ self.text_projection[0].build([None, None, None, self.text_embed_dim])
1511
+ with tf.name_scope(self.text_projection[1].name):
1512
+ self.text_projection[1].build((None, self.projection_intermediate_dim))
1513
+ with tf.name_scope(self.text_projection[3].name):
1514
+ self.text_projection[3].build([None, None, None, self.projection_intermediate_dim])
1515
+
1516
+ @unpack_inputs
1517
+ def get_text_features(
1518
+ self,
1519
+ input_ids: TFModelInputType | None = None,
1520
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1521
+ position_ids: np.ndarray | tf.Tensor | None = None,
1522
+ output_attentions: Optional[bool] = None,
1523
+ output_hidden_states: Optional[bool] = None,
1524
+ return_dict: Optional[bool] = None,
1525
+ training: bool = False,
1526
+ ) -> tf.Tensor:
1527
+ if input_ids is None:
1528
+ raise ValueError("You have to specify either input_ids")
1529
+
1530
+ input_shape = shape_list(input_ids)
1531
+
1532
+ if attention_mask is None:
1533
+ attention_mask = tf.fill(dims=input_shape, value=1)
1534
+
1535
+ text_outputs = self.text_model(
1536
+ input_ids=input_ids,
1537
+ attention_mask=attention_mask,
1538
+ position_ids=position_ids,
1539
+ output_attentions=output_attentions,
1540
+ output_hidden_states=output_hidden_states,
1541
+ return_dict=return_dict,
1542
+ training=training,
1543
+ )
1544
+
1545
+ pooled_output = text_outputs[1]
1546
+ for layer in self.text_projection:
1547
+ pooled_output = layer(pooled_output)
1548
+
1549
+ text_features = pooled_output
1550
+ return text_features
1551
+
1552
+ @unpack_inputs
1553
+ def get_image_features(
1554
+ self,
1555
+ pixel_values: TFModelInputType | None = None,
1556
+ output_attentions: Optional[bool] = None,
1557
+ output_hidden_states: Optional[bool] = None,
1558
+ return_dict: Optional[bool] = None,
1559
+ training: bool = False,
1560
+ ) -> tf.Tensor:
1561
+ if pixel_values is None:
1562
+ raise ValueError("You have to specify pixel_values")
1563
+
1564
+ vision_outputs = self.vision_model(
1565
+ pixel_values=pixel_values,
1566
+ output_attentions=output_attentions,
1567
+ output_hidden_states=output_hidden_states,
1568
+ return_dict=return_dict,
1569
+ training=training,
1570
+ )
1571
+
1572
+ pooled_output = vision_outputs[1]
1573
+ for layer in self.visual_projection:
1574
+ pooled_output = layer(pooled_output)
1575
+
1576
+ image_features = pooled_output
1577
+ return image_features
1578
+
1579
+ @unpack_inputs
1580
+ def call(
1581
+ self,
1582
+ input_ids: TFModelInputType | None = None,
1583
+ pixel_values: TFModelInputType | None = None,
1584
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1585
+ position_ids: np.ndarray | tf.Tensor | None = None,
1586
+ return_loss: Optional[bool] = None,
1587
+ output_attentions: Optional[bool] = None,
1588
+ output_hidden_states: Optional[bool] = None,
1589
+ output_segmentation: Optional[bool] = None,
1590
+ return_dict: Optional[bool] = None,
1591
+ training: bool = False,
1592
+ ) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]:
1593
+ if input_ids is None:
1594
+ raise ValueError("You have to specify either input_ids")
1595
+ if pixel_values is None:
1596
+ raise ValueError("You have to specify pixel_values")
1597
+
1598
+ input_shape = shape_list(input_ids)
1599
+
1600
+ if attention_mask is None:
1601
+ attention_mask = tf.fill(dims=input_shape, value=1)
1602
+ if output_segmentation:
1603
+ output_attentions = True
1604
+ vision_outputs = self.vision_model(
1605
+ pixel_values=pixel_values,
1606
+ output_attentions=output_attentions,
1607
+ output_hidden_states=output_hidden_states,
1608
+ return_dict=return_dict,
1609
+ training=training,
1610
+ )
1611
+
1612
+ text_outputs = self.text_model(
1613
+ input_ids=input_ids,
1614
+ attention_mask=attention_mask,
1615
+ position_ids=position_ids,
1616
+ output_attentions=output_attentions,
1617
+ output_hidden_states=output_hidden_states,
1618
+ return_dict=return_dict,
1619
+ training=training,
1620
+ )
1621
+
1622
+ image_embeds = vision_outputs[1]
1623
+ for layer in self.visual_projection:
1624
+ image_embeds = layer(image_embeds)
1625
+
1626
+ text_embeds = text_outputs[1]
1627
+ for layer in self.text_projection:
1628
+ text_embeds = layer(text_embeds)
1629
+
1630
+ # normalized features
1631
+ image_embeds = image_embeds / tf.norm(image_embeds, axis=-1, keepdims=True)
1632
+ text_embeds = text_embeds / tf.norm(text_embeds, axis=-1, keepdims=True)
1633
+
1634
+ # cosine similarity as logits
1635
+ logit_scale = tf.math.exp(self.logit_scale)
1636
+ logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale
1637
+ logits_per_image = tf.transpose(logits_per_text)
1638
+
1639
+ seg_logits = None
1640
+ if output_segmentation:
1641
+ # grouped features
1642
+ # [batch_size_image, num_group, hidden_size]
1643
+ image_group_embeds = vision_outputs[0]
1644
+ # [batch_size_image*num_group, hidden_size]
1645
+ image_group_embeds = tf.reshape(image_group_embeds, shape=(-1, shape_list(image_group_embeds)[-1]))
1646
+ for layer in self.visual_projection:
1647
+ image_group_embeds = layer(image_group_embeds)
1648
+ if output_hidden_states:
1649
+ attentions = vision_outputs[3]
1650
+ else:
1651
+ attentions = vision_outputs[2]
1652
+ # [batch_size_image, num_group, height, width]
1653
+ grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:])
1654
+
1655
+ # normalized features
1656
+ image_group_embeds = image_group_embeds / tf.norm(
1657
+ tensor=image_group_embeds, ord="euclidean", axis=-1, keepdims=True
1658
+ )
1659
+ # [batch_size_image x num_group, batch_size_text]
1660
+ logits_per_image_group = tf.matmul(image_group_embeds, text_embeds, transpose_b=True) * logit_scale
1661
+ # [batch_size_image, batch_size_text, num_group]
1662
+ logits_per_image_group = tf.reshape(
1663
+ logits_per_image_group, shape=(image_embeds.shape[0], -1, text_embeds.shape[0])
1664
+ )
1665
+ logits_per_image_group = tf.transpose(logits_per_image_group, perm=(0, 2, 1))
1666
+
1667
+ # [batch_size_image, batch_size_text, height x width]
1668
+ flatten_grouping = tf.reshape(grouping, shape=(shape_list(grouping)[0], shape_list(grouping)[1], -1))
1669
+
1670
+ # [batch_size_image, batch_size_text, height, width]
1671
+ seg_logits = tf.matmul(logits_per_image_group, flatten_grouping) * logit_scale
1672
+ seg_logits = tf.reshape(
1673
+ seg_logits, shape=(seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3])
1674
+ )
1675
+
1676
+ loss = None
1677
+ if return_loss:
1678
+ loss = groupvit_loss(logits_per_text)[None, ...]
1679
+
1680
+ if not return_dict:
1681
+ if seg_logits is not None:
1682
+ output = (
1683
+ logits_per_image,
1684
+ logits_per_text,
1685
+ seg_logits,
1686
+ text_embeds,
1687
+ image_embeds,
1688
+ text_outputs,
1689
+ vision_outputs,
1690
+ )
1691
+ else:
1692
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1693
+ return ((loss,) + output) if loss is not None else output
1694
+
1695
+ return TFGroupViTModelOutput(
1696
+ loss=loss,
1697
+ logits_per_image=logits_per_image,
1698
+ logits_per_text=logits_per_text,
1699
+ segmentation_logits=seg_logits,
1700
+ text_embeds=text_embeds,
1701
+ image_embeds=image_embeds,
1702
+ text_model_output=text_outputs,
1703
+ vision_model_output=vision_outputs,
1704
+ )
1705
+
1706
+
1707
+ class TFGroupViTPreTrainedModel(TFPreTrainedModel):
1708
+ """
1709
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1710
+ models.
1711
+ """
1712
+
1713
+ config_class = GroupViTConfig
1714
+ base_model_prefix = "groupvit"
1715
+
1716
+
1717
+ GROUPVIT_START_DOCSTRING = r"""
1718
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1719
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1720
+ etc.)
1721
+
1722
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1723
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1724
+ behavior.
1725
+
1726
+ <Tip>
1727
+
1728
+ TF 2.0 models accepts two formats as inputs:
1729
+
1730
+ - having all inputs as keyword arguments (like PyTorch models), or
1731
+ - having all inputs as a list, tuple or dict in the first positional arguments.
1732
+
1733
+ This second option is useful when using [`keras.Model.fit`] method which currently requires having all the
1734
+ tensors in the first argument of the model call function: `model(inputs)`.
1735
+
1736
+ If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
1737
+ first positional argument :
1738
+
1739
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1740
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1741
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1742
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1743
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1744
+
1745
+ </Tip>
1746
+
1747
+ Args:
1748
+ config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model.
1749
+ Initializing with a config file does not load the weights associated with the model, only the
1750
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1751
+ """
1752
+
1753
+ GROUPVIT_TEXT_INPUTS_DOCSTRING = r"""
1754
+ Args:
1755
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1756
+ Indices of input sequence tokens in the vocabulary.
1757
+
1758
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1759
+ [`PreTrainedTokenizer.encode`] for details.
1760
+
1761
+ [What are input IDs?](../glossary#input-ids)
1762
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1763
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1764
+
1765
+ - 1 for tokens that are **not masked**,
1766
+ - 0 for tokens that are **masked**.
1767
+
1768
+ [What are attention masks?](../glossary#attention-mask)
1769
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1770
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1771
+ config.max_position_embeddings - 1]`.
1772
+
1773
+ [What are position IDs?](../glossary#position-ids)
1774
+ output_attentions (`bool`, *optional*):
1775
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1776
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1777
+ config will be used instead.
1778
+ output_hidden_states (`bool`, *optional*):
1779
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1780
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1781
+ used instead.
1782
+ return_dict (`bool`, *optional*):
1783
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1784
+ eager mode, in graph mode the value will always be set to True.
1785
+ training (`bool`, *optional*, defaults to `False``):
1786
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1787
+ behaviors between training and evaluation).
1788
+ """
1789
+
1790
+ GROUPVIT_VISION_INPUTS_DOCSTRING = r"""
1791
+ Args:
1792
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
1793
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
1794
+ [`CLIPImageProcessor.__call__`] for details.
1795
+ output_attentions (`bool`, *optional*):
1796
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1797
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1798
+ config will be used instead.
1799
+ output_hidden_states (`bool`, *optional*):
1800
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1801
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1802
+ used instead.
1803
+ return_dict (`bool`, *optional*):
1804
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1805
+ eager mode, in graph mode the value will always be set to True.
1806
+ training (`bool`, *optional*, defaults to `False``):
1807
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1808
+ behaviors between training and evaluation).
1809
+ """
1810
+
1811
+ GROUPVIT_INPUTS_DOCSTRING = r"""
1812
+ Args:
1813
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1814
+ Indices of input sequence tokens in the vocabulary.
1815
+
1816
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1817
+ [`PreTrainedTokenizer.encode`] for details.
1818
+
1819
+ [What are input IDs?](../glossary#input-ids)
1820
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
1821
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
1822
+ [`CLIPImageProcessor.__call__`] for details.
1823
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1824
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1825
+
1826
+ - 1 for tokens that are **not masked**,
1827
+ - 0 for tokens that are **masked**.
1828
+
1829
+ [What are attention masks?](../glossary#attention-mask)
1830
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1831
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1832
+ config.max_position_embeddings - 1]`.
1833
+
1834
+ [What are position IDs?](../glossary#position-ids)
1835
+ return_loss (`bool`, *optional*):
1836
+ Whether or not to return the contrastive loss.
1837
+ output_attentions (`bool`, *optional*):
1838
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1839
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1840
+ config will be used instead.
1841
+ output_hidden_states (`bool`, *optional*):
1842
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1843
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1844
+ used instead.
1845
+ return_dict (`bool`, *optional*):
1846
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1847
+ eager mode, in graph mode the value will always be set to True.
1848
+ training (`bool`, *optional*, defaults to `False``):
1849
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1850
+ behaviors between training and evaluation).
1851
+ """
1852
+
1853
+
1854
+ class TFGroupViTTextModel(TFGroupViTPreTrainedModel):
1855
+ config_class = GroupViTTextConfig
1856
+ main_input_name = "input_ids"
1857
+
1858
+ def __init__(self, config: GroupViTTextConfig, *inputs, **kwargs):
1859
+ super().__init__(config, *inputs, **kwargs)
1860
+
1861
+ self.groupvit = TFGroupViTTextMainLayer(config, name="groupvit")
1862
+
1863
+ @unpack_inputs
1864
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1865
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTTextConfig)
1866
+ def call(
1867
+ self,
1868
+ input_ids: TFModelInputType | None = None,
1869
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1870
+ position_ids: np.ndarray | tf.Tensor | None = None,
1871
+ output_attentions: Optional[bool] = None,
1872
+ output_hidden_states: Optional[bool] = None,
1873
+ return_dict: Optional[bool] = None,
1874
+ training: bool = False,
1875
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1876
+ r"""
1877
+ Returns:
1878
+
1879
+ Examples:
1880
+
1881
+ ```python
1882
+ >>> from transformers import CLIPTokenizer, TFGroupViTTextModel
1883
+
1884
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
1885
+ >>> model = TFGroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1886
+
1887
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
1888
+
1889
+ >>> outputs = model(**inputs)
1890
+ >>> last_hidden_state = outputs.last_hidden_state
1891
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
1892
+ ```"""
1893
+
1894
+ outputs = self.groupvit(
1895
+ input_ids=input_ids,
1896
+ attention_mask=attention_mask,
1897
+ position_ids=position_ids,
1898
+ output_attentions=output_attentions,
1899
+ output_hidden_states=output_hidden_states,
1900
+ return_dict=return_dict,
1901
+ training=training,
1902
+ )
1903
+
1904
+ return outputs
1905
+
1906
+ def build(self, input_shape=None):
1907
+ if self.built:
1908
+ return
1909
+ self.built = True
1910
+ if getattr(self, "groupvit", None) is not None:
1911
+ with tf.name_scope(self.groupvit.name):
1912
+ self.groupvit.build(None)
1913
+
1914
+
1915
+ class TFGroupViTVisionModel(TFGroupViTPreTrainedModel):
1916
+ config_class = GroupViTVisionConfig
1917
+ main_input_name = "pixel_values"
1918
+
1919
+ def __init__(self, config: GroupViTVisionConfig, *inputs, **kwargs):
1920
+ super().__init__(config, *inputs, **kwargs)
1921
+
1922
+ self.groupvit = TFGroupViTVisionMainLayer(config, name="groupvit")
1923
+
1924
+ @unpack_inputs
1925
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
1926
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
1927
+ def call(
1928
+ self,
1929
+ pixel_values: TFModelInputType | None = None,
1930
+ output_attentions: Optional[bool] = None,
1931
+ output_hidden_states: Optional[bool] = None,
1932
+ return_dict: Optional[bool] = None,
1933
+ training: bool = False,
1934
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1935
+ r"""
1936
+ Returns:
1937
+
1938
+ Examples:
1939
+
1940
+ ```python
1941
+ >>> from PIL import Image
1942
+ >>> import requests
1943
+ >>> from transformers import AutoProcessor, TFGroupViTVisionModel
1944
+
1945
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
1946
+ >>> model = TFGroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1947
+
1948
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1949
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1950
+
1951
+ >>> inputs = processor(images=image, return_tensors="tf")
1952
+
1953
+ >>> outputs = model(**inputs)
1954
+ >>> last_hidden_state = outputs.last_hidden_state
1955
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1956
+ ```"""
1957
+
1958
+ outputs = self.groupvit(
1959
+ pixel_values=pixel_values,
1960
+ output_attentions=output_attentions,
1961
+ output_hidden_states=output_hidden_states,
1962
+ return_dict=return_dict,
1963
+ training=training,
1964
+ )
1965
+
1966
+ return outputs
1967
+
1968
+ def build(self, input_shape=None):
1969
+ if self.built:
1970
+ return
1971
+ self.built = True
1972
+ if getattr(self, "groupvit", None) is not None:
1973
+ with tf.name_scope(self.groupvit.name):
1974
+ self.groupvit.build(None)
1975
+
1976
+
1977
+ @add_start_docstrings(GROUPVIT_START_DOCSTRING)
1978
+ class TFGroupViTModel(TFGroupViTPreTrainedModel):
1979
+ config_class = GroupViTConfig
1980
+
1981
+ def __init__(self, config: GroupViTConfig, *inputs, **kwargs):
1982
+ super().__init__(config, *inputs, **kwargs)
1983
+
1984
+ self.groupvit = TFGroupViTMainLayer(config, name="groupvit")
1985
+
1986
+ @unpack_inputs
1987
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1988
+ def get_text_features(
1989
+ self,
1990
+ input_ids: TFModelInputType | None = None,
1991
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1992
+ position_ids: np.ndarray | tf.Tensor | None = None,
1993
+ output_attentions: Optional[bool] = None,
1994
+ output_hidden_states: Optional[bool] = None,
1995
+ return_dict: Optional[bool] = None,
1996
+ training: bool = False,
1997
+ ) -> tf.Tensor:
1998
+ r"""
1999
+ Returns:
2000
+ text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
2001
+ the projection layer to the pooled output of [`TFGroupViTTextModel`].
2002
+
2003
+ Examples:
2004
+
2005
+ ```python
2006
+ >>> from transformers import CLIPTokenizer, TFGroupViTModel
2007
+
2008
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
2009
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
2010
+
2011
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
2012
+ >>> text_features = model.get_text_features(**inputs)
2013
+ ```"""
2014
+
2015
+ text_features = self.groupvit.get_text_features(
2016
+ input_ids=input_ids,
2017
+ attention_mask=attention_mask,
2018
+ position_ids=position_ids,
2019
+ output_attentions=output_attentions,
2020
+ output_hidden_states=output_hidden_states,
2021
+ return_dict=return_dict,
2022
+ training=training,
2023
+ )
2024
+
2025
+ return text_features
2026
+
2027
+ @unpack_inputs
2028
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
2029
+ def get_image_features(
2030
+ self,
2031
+ pixel_values: TFModelInputType | None = None,
2032
+ output_attentions: Optional[bool] = None,
2033
+ output_hidden_states: Optional[bool] = None,
2034
+ return_dict: Optional[bool] = None,
2035
+ training: bool = False,
2036
+ ) -> tf.Tensor:
2037
+ r"""
2038
+ Returns:
2039
+ image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying
2040
+ the projection layer to the pooled output of [`TFGroupViTVisionModel`].
2041
+
2042
+ Examples:
2043
+
2044
+ ```python
2045
+ >>> from PIL import Image
2046
+ >>> import requests
2047
+ >>> from transformers import AutoProcessor, TFGroupViTModel
2048
+
2049
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
2050
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
2051
+
2052
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
2053
+ >>> image = Image.open(requests.get(url, stream=True).raw)
2054
+
2055
+ >>> inputs = processor(images=image, return_tensors="tf")
2056
+
2057
+ >>> image_features = model.get_image_features(**inputs)
2058
+ ```"""
2059
+
2060
+ image_features = self.groupvit.get_image_features(
2061
+ pixel_values=pixel_values,
2062
+ output_attentions=output_attentions,
2063
+ output_hidden_states=output_hidden_states,
2064
+ return_dict=return_dict,
2065
+ training=training,
2066
+ )
2067
+
2068
+ return image_features
2069
+
2070
+ @unpack_inputs
2071
+ @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
2072
+ @replace_return_docstrings(output_type=TFGroupViTModelOutput, config_class=GroupViTConfig)
2073
+ def call(
2074
+ self,
2075
+ input_ids: TFModelInputType | None = None,
2076
+ pixel_values: TFModelInputType | None = None,
2077
+ attention_mask: np.ndarray | tf.Tensor | None = None,
2078
+ position_ids: np.ndarray | tf.Tensor | None = None,
2079
+ return_loss: Optional[bool] = None,
2080
+ output_attentions: Optional[bool] = None,
2081
+ output_hidden_states: Optional[bool] = None,
2082
+ output_segmentation: Optional[bool] = None,
2083
+ return_dict: Optional[bool] = None,
2084
+ training: bool = False,
2085
+ ) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]:
2086
+ r"""
2087
+ Returns:
2088
+
2089
+ Examples:
2090
+
2091
+ ```python
2092
+ >>> from PIL import Image
2093
+ >>> import requests
2094
+ >>> from transformers import AutoProcessor, TFGroupViTModel
2095
+ >>> import tensorflow as tf
2096
+
2097
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
2098
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
2099
+
2100
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
2101
+ >>> image = Image.open(requests.get(url, stream=True).raw)
2102
+
2103
+ >>> inputs = processor(
2104
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True
2105
+ ... )
2106
+
2107
+ >>> outputs = model(**inputs)
2108
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
2109
+ >>> probs = tf.math.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
2110
+ ```"""
2111
+
2112
+ outputs = self.groupvit(
2113
+ input_ids=input_ids,
2114
+ pixel_values=pixel_values,
2115
+ attention_mask=attention_mask,
2116
+ position_ids=position_ids,
2117
+ return_loss=return_loss,
2118
+ output_attentions=output_attentions,
2119
+ output_hidden_states=output_hidden_states,
2120
+ output_segmentation=output_segmentation,
2121
+ return_dict=return_dict,
2122
+ training=training,
2123
+ )
2124
+
2125
+ return outputs
2126
+
2127
+ def serving_output(self, output: TFGroupViTModelOutput) -> TFGroupViTModelOutput:
2128
+ # TODO: As is this currently fails with saved_model=True, because
2129
+ # TensorFlow cannot trace through nested dataclasses. Reference:
2130
+ # https://github.com/huggingface/transformers/pull/16886
2131
+ return output
2132
+
2133
+ def build(self, input_shape=None):
2134
+ if self.built:
2135
+ return
2136
+ self.built = True
2137
+ if getattr(self, "groupvit", None) is not None:
2138
+ with tf.name_scope(self.groupvit.name):
2139
+ self.groupvit.build(None)
parrot/lib/python3.10/site-packages/transformers/models/idefics2/__init__.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_idefics2": ["Idefics2Config"]}
20
+
21
+
22
+ try:
23
+ if not is_vision_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["image_processing_idefics2"] = ["Idefics2ImageProcessor"]
29
+
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_idefics2"] = [
38
+ "Idefics2ForConditionalGeneration",
39
+ "Idefics2PreTrainedModel",
40
+ "Idefics2Model",
41
+ ]
42
+ _import_structure["processing_idefics2"] = ["Idefics2Processor"]
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_idefics2 import Idefics2Config
46
+
47
+ try:
48
+ if not is_vision_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .image_processing_idefics2 import Idefics2ImageProcessor
54
+
55
+ try:
56
+ if not is_torch_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .modeling_idefics2 import (
62
+ Idefics2ForConditionalGeneration,
63
+ Idefics2Model,
64
+ Idefics2PreTrainedModel,
65
+ )
66
+ from .processing_idefics2 import Idefics2Processor
67
+
68
+
69
+ else:
70
+ import sys
71
+
72
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.11 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/configuration_idefics2.cpython-310.pyc ADDED
Binary file (9.98 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/convert_idefics2_weights_to_hf.cpython-310.pyc ADDED
Binary file (4.34 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/image_processing_idefics2.cpython-310.pyc ADDED
Binary file (22.7 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/modeling_idefics2.cpython-310.pyc ADDED
Binary file (63 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/processing_idefics2.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/idefics2/configuration_idefics2.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Idefics2 model configuration"""
15
+
16
+ import os
17
+ from typing import Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+ from ..auto import CONFIG_MAPPING
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class Idefics2VisionConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`Idefics2VisionModel`]. It is used to instantiate a
30
+ Idefics2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
32
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics2 model
33
+ [HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b).
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ hidden_size (`int`, *optional*, defaults to 768):
40
+ Dimensionality of the encoder layers and the pooler layer.
41
+ intermediate_size (`int`, *optional*, defaults to 3072):
42
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
43
+ num_hidden_layers (`int`, *optional*, defaults to 12):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 12):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ num_channels (`int`, *optional*, defaults to 3):
48
+ Number of channels in the input images.
49
+ image_size (`int`, *optional*, defaults to 224):
50
+ The size (resolution) of each image.
51
+ patch_size (`int`, *optional*, defaults to 32):
52
+ The size (resolution) of each patch.
53
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
56
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
57
+ The epsilon used by the layer normalization layers.
58
+ attention_dropout (`float`, *optional*, defaults to 0.0):
59
+ The dropout ratio for the attention probabilities.
60
+ intializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation for initializing all weight matrices in the model.
62
+
63
+ Example:
64
+
65
+ ```python
66
+ >>> from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
67
+ >>> from transformers.models.idefics2.configuration_idefics2 import Idefics2VisionConfig
68
+
69
+ >>> # Initializing a Idefics2VisionConfig with google/siglip-base-patch16-224 style configuration
70
+ >>> configuration = Idefics2VisionConfig()
71
+
72
+ >>> # Initializing a Idefics2VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration
73
+ >>> model = Idefics2VisionTransformer(configuration)
74
+
75
+ >>> # Accessing the model configuration
76
+ >>> configuration = model.config
77
+ ```"""
78
+
79
+ model_type = "idefics2"
80
+
81
+ def __init__(
82
+ self,
83
+ hidden_size=768,
84
+ intermediate_size=3072,
85
+ num_hidden_layers=12,
86
+ num_attention_heads=12,
87
+ num_channels=3,
88
+ image_size=224,
89
+ patch_size=32,
90
+ hidden_act="gelu_pytorch_tanh",
91
+ layer_norm_eps=1e-6,
92
+ attention_dropout=0.0,
93
+ initializer_range=0.02,
94
+ **kwargs,
95
+ ):
96
+ super().__init__(**kwargs)
97
+
98
+ self.hidden_size = hidden_size
99
+ self.intermediate_size = intermediate_size
100
+ self.num_hidden_layers = num_hidden_layers
101
+ self.num_attention_heads = num_attention_heads
102
+ self.num_channels = num_channels
103
+ self.patch_size = patch_size
104
+ self.image_size = image_size
105
+ self.attention_dropout = attention_dropout
106
+ self.layer_norm_eps = layer_norm_eps
107
+ self.hidden_act = hidden_act
108
+ self.initializer_range = initializer_range
109
+
110
+ @classmethod
111
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
112
+ cls._set_token_in_kwargs(kwargs)
113
+
114
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
115
+
116
+ # get the vision config dict if we are loading from Idefics2Config
117
+ if config_dict.get("model_type") == "idefics2":
118
+ config_dict = config_dict["vision_config"]
119
+
120
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
121
+ logger.warning(
122
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
123
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
124
+ )
125
+
126
+ return cls.from_dict(config_dict, **kwargs)
127
+
128
+
129
+ class Idefics2PerceiverConfig(PretrainedConfig):
130
+ r"""
131
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
132
+ documentation from [`PretrainedConfig`] for more information.
133
+
134
+ Args:
135
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
136
+ The non-linear activation function (function or string) in the perceiver block.
137
+ resampler_n_latents (`int`, *optional*, defaults to 64):
138
+ Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
139
+ resampler_depth (`int`, *optional*, defaults to 3):
140
+ Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (<= 3).
141
+ resampler_n_heads (`int`, *optional*, defaults to 16):
142
+ Number of heads in each Transformer block (for multi-headed self-attention).
143
+ resampler_head_dim (`int`, *optional*, defaults to 96):
144
+ Dimensionality of each head projection in the Transformer block.
145
+ num_key_value_heads (`int`, *optional*, defaults to 4):
146
+ Number of key-value heads in the perceiver attention block.
147
+ attention_dropout (`float`, *optional*, defaults to 0.0):
148
+ The dropout ratio for the attention probabilities.
149
+ """
150
+
151
+ model_type = "idefics2"
152
+
153
+ def __init__(
154
+ self,
155
+ hidden_act="silu",
156
+ resampler_n_latents=64,
157
+ resampler_depth=3,
158
+ resampler_n_heads=16,
159
+ resampler_head_dim=96,
160
+ num_key_value_heads=4,
161
+ attention_dropout=0.0,
162
+ **kwargs,
163
+ ):
164
+ self.hidden_act = hidden_act
165
+ self.resampler_n_latents = resampler_n_latents
166
+ self.resampler_depth = resampler_depth
167
+ self.resampler_n_heads = resampler_n_heads
168
+ self.num_key_value_heads = num_key_value_heads
169
+ self.resampler_head_dim = resampler_head_dim
170
+ self.attention_dropout = attention_dropout
171
+ if self.num_key_value_heads > self.resampler_n_heads:
172
+ raise ValueError(
173
+ f"num_key_value_heads={self.num_key_value_heads} must be less than or equal to"
174
+ f" resampler_n_heads={self.resampler_n_heads}"
175
+ )
176
+ super().__init__(**kwargs)
177
+
178
+
179
+ class Idefics2Config(PretrainedConfig):
180
+ r"""
181
+ This is the configuration class to store the configuration of a [`Idefics2Model`]. It is used to instantiate a
182
+ Idefics2 model according to the specified arguments, defining the model architecture. Instantiating a
183
+ configuration with the defaults will yield a similar configuration to that of the model of the Idefics2
184
+ [HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b) architecture.
185
+
186
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
187
+ documentation from [`PretrainedConfig`] for more information.
188
+
189
+ Args:
190
+ use_cache (`bool`, *optional*, defaults to `True`):
191
+ Whether or not the model should cache the key/value pairs of the attention mechanism.
192
+ image_token_id (`int`, *optional*, defaults to 32001):
193
+ The id of the "image" token.
194
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
195
+ Whether or not to tie the word embeddings with the token embeddings.
196
+ vision_config (`IdeficsVisionConfig` or `dict`, *optional*):
197
+ Custom vision config or dict
198
+ perceiver_config (`IdeficsPerceiverConfig` or `dict`, *optional*):
199
+ Custom perceiver config or dict
200
+ text_config (`MistralConfig` or `dict`, *optional*):
201
+ Custom text config or dict for the text model
202
+
203
+ Example:
204
+ ```python
205
+ >>> from transformers import Idefics2Model, Idefics2Config
206
+ >>> # Initializing configuration
207
+ >>> configuration = Idefics2Config()
208
+ >>> # Initializing a model from the configuration
209
+ >>> model = Idefics2Model(configuration)
210
+ >>> # Accessing the model configuration
211
+ >>> configuration = model.config
212
+ ```"""
213
+
214
+ model_type = "idefics2"
215
+ is_composition = True
216
+
217
+ def __init__(
218
+ self,
219
+ use_cache=True,
220
+ image_token_id=32_001,
221
+ tie_word_embeddings=False,
222
+ vision_config=None,
223
+ perceiver_config=None,
224
+ text_config=None,
225
+ **kwargs,
226
+ ):
227
+ self.image_token_id = image_token_id
228
+ self.use_cache = use_cache
229
+ self.tie_word_embeddings = tie_word_embeddings
230
+
231
+ if perceiver_config is None:
232
+ self.perceiver_config = Idefics2PerceiverConfig()
233
+ logger.info("perciver_config is None, using default perceiver config")
234
+ elif isinstance(perceiver_config, dict):
235
+ self.perceiver_config = Idefics2PerceiverConfig(**perceiver_config)
236
+ elif isinstance(perceiver_config, Idefics2PerceiverConfig):
237
+ self.perceiver_config = perceiver_config
238
+
239
+ if vision_config is None:
240
+ self.vision_config = Idefics2VisionConfig()
241
+ logger.info("vision_config is None, using default vision config")
242
+ elif isinstance(vision_config, dict):
243
+ self.vision_config = Idefics2VisionConfig(**vision_config)
244
+ elif isinstance(vision_config, Idefics2VisionConfig):
245
+ self.vision_config = vision_config
246
+
247
+ if isinstance(text_config, dict):
248
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "mistral"
249
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
250
+ elif text_config is None:
251
+ logger.info("text_config is None, using default text config")
252
+ text_config = CONFIG_MAPPING["mistral"](
253
+ max_position_embeddings=4096 * 8,
254
+ rms_norm_eps=1e-5,
255
+ # None in the original configuration_mistral, we set it to the unk_token_id
256
+ pad_token_id=0,
257
+ tie_word_embeddings=False,
258
+ )
259
+
260
+ self.text_config = text_config
261
+
262
+ super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings)
parrot/lib/python3.10/site-packages/transformers/models/idefics2/convert_idefics2_weights_to_hf.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import copy
17
+
18
+ import torch
19
+ from accelerate import init_empty_weights
20
+
21
+ from transformers import (
22
+ AutoConfig,
23
+ AutoModelForCausalLM,
24
+ AutoTokenizer,
25
+ Idefics2Config,
26
+ Idefics2ForConditionalGeneration,
27
+ Idefics2ImageProcessor,
28
+ Idefics2Processor,
29
+ MistralConfig,
30
+ )
31
+
32
+
33
+ EPILOG_TXT = """Example:
34
+ python transformers/src/transformers/models/idefics2/convert_idefics2_weights_to_hf.py --original_model_id HuggingFaceM4/idefics2-8b --output_hub_path org/idefics2
35
+ """
36
+
37
+
38
+ KEYS_TO_MODIFY_MAPPING = {
39
+ "lm_head.weight": "lm_head.linear.weight",
40
+ "model.layers": "model.text_model.layers",
41
+ "model.norm": "model.text_model.norm",
42
+ "model.perceiver_resampler": "model.connector.perceiver_resampler",
43
+ "model.modality_projection": "model.connector.modality_projection",
44
+ }
45
+
46
+
47
+ WEIGHTS_TO_MERGE_MAPPING = (
48
+ # (weights to merge in merging order), (new weight name)
49
+ (
50
+ ("model.embed_tokens.weight", "model.embed_tokens.additional_embedding.weight"),
51
+ "model.text_model.embed_tokens.weight",
52
+ ),
53
+ (("lm_head.linear.weight", "additional_fc.weight"), "lm_head.weight"),
54
+ )
55
+
56
+
57
+ def convert_state_dict_to_hf(state_dict):
58
+ new_state_dict = {}
59
+ for key, value in state_dict.items():
60
+ if key.endswith(".inv_freq"):
61
+ continue
62
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
63
+ if key_to_modify in key:
64
+ key = key.replace(key_to_modify, new_key)
65
+
66
+ new_state_dict[key] = value
67
+ return new_state_dict
68
+
69
+
70
+ def merge_weights(state_dict):
71
+ new_state_dict = copy.deepcopy(state_dict)
72
+
73
+ # Merge the weights
74
+ for weights_to_merge, new_weight_name in WEIGHTS_TO_MERGE_MAPPING:
75
+ for weight in weights_to_merge:
76
+ assert weight in state_dict, f"Weight {weight} is missing in the state dict"
77
+ if new_weight_name not in new_state_dict:
78
+ new_state_dict[new_weight_name] = [state_dict[weight]]
79
+ else:
80
+ new_state_dict[new_weight_name].append(state_dict[weight])
81
+ new_state_dict[new_weight_name] = torch.cat(new_state_dict[new_weight_name], dim=0)
82
+
83
+ # Remove the weights that were merged
84
+ for weights_to_merge, new_weight_name in WEIGHTS_TO_MERGE_MAPPING:
85
+ for weight in weights_to_merge:
86
+ if weight in new_state_dict and weight != new_weight_name:
87
+ new_state_dict.pop(weight)
88
+
89
+ return new_state_dict
90
+
91
+
92
+ def get_config(checkpoint):
93
+ if checkpoint == "HuggingFaceM4/idefics2":
94
+ # We load the config then recreate to use the text_config
95
+ config = AutoConfig.from_pretrained(checkpoint)
96
+ text_config = MistralConfig(
97
+ vocab_size=config.vocab_size + config.additional_vocab_size,
98
+ hidden_size=config.hidden_size,
99
+ intermediate_size=config.intermediate_size,
100
+ num_hidden_layers=config.num_hidden_layers,
101
+ num_attention_heads=config.num_attention_heads,
102
+ num_key_value_heads=config.num_key_value_heads,
103
+ hidden_act=config.hidden_act,
104
+ max_position_embeddings=config.max_position_embeddings,
105
+ initializer_range=config.initializer_range,
106
+ rms_norm_eps=config.rms_norm_eps,
107
+ tie_word_embeddings=config.tie_word_embeddings,
108
+ rope_theta=config.rope_theta,
109
+ sliding_window=config.sliding_window,
110
+ attention_dropout=config.attention_dropout,
111
+ pad_token_id=config.pad_token_id,
112
+ bos_token_id=config.bos_token_id,
113
+ eos_token_id=config.eos_token_id,
114
+ )
115
+ perceiver_config = config.perceiver_config.to_dict()
116
+ config = Idefics2Config(
117
+ text_config=text_config.to_dict(),
118
+ vision_config=config.vision_config,
119
+ perceiver_config=perceiver_config,
120
+ use_cache=config.use_cache,
121
+ image_token_id=config.image_token_id,
122
+ tie_word_embeddings=config.tie_word_embeddings,
123
+ )
124
+ return config
125
+
126
+ return AutoConfig.from_pretrained(checkpoint)
127
+
128
+
129
+ def convert_idefics2_hub_to_hf(original_model_id, output_hub_path, push_to_hub):
130
+ # The original model maps to AutoModelForCausalLM, converted we map to Idefics2ForConditionalGeneration
131
+ original_model = AutoModelForCausalLM.from_pretrained(original_model_id, trust_remote_code=True)
132
+ # The original model doesn't use the idefics2 processing objects
133
+ image_seq_len = original_model.config.perceiver_config.resampler_n_latents
134
+ image_processor = Idefics2ImageProcessor()
135
+ tokenizer = AutoTokenizer.from_pretrained(original_model_id)
136
+ processor = Idefics2Processor(
137
+ image_processor=image_processor,
138
+ tokenizer=tokenizer,
139
+ image_seq_len=image_seq_len,
140
+ )
141
+ state_dict = original_model.state_dict()
142
+ state_dict = convert_state_dict_to_hf(state_dict)
143
+
144
+ # Merge weights
145
+ state_dict = merge_weights(state_dict)
146
+
147
+ config = get_config(original_model_id)
148
+
149
+ with init_empty_weights():
150
+ model = Idefics2ForConditionalGeneration(config)
151
+
152
+ model.load_state_dict(state_dict, strict=True, assign=True)
153
+
154
+ model.save_pretrained(output_hub_path)
155
+ processor.save_pretrained(output_hub_path)
156
+
157
+ if push_to_hub:
158
+ model.push_to_hub(output_hub_path, private=True)
159
+ processor.push_to_hub(output_hub_path, private=True)
160
+
161
+
162
+ def main():
163
+ parser = argparse.ArgumentParser(
164
+ epilog=EPILOG_TXT,
165
+ formatter_class=argparse.RawDescriptionHelpFormatter,
166
+ )
167
+ parser.add_argument(
168
+ "--original_model_id",
169
+ help="Hub location of the text model",
170
+ )
171
+ parser.add_argument(
172
+ "--output_hub_path",
173
+ help="Location on the hub of the converted model",
174
+ )
175
+ parser.add_argument(
176
+ "--push_to_hub",
177
+ action="store_true",
178
+ help="If set, the model will be pushed to the hub after conversion.",
179
+ )
180
+ args = parser.parse_args()
181
+ convert_idefics2_hub_to_hf(args.original_model_id, args.output_hub_path, args.push_to_hub)
182
+
183
+
184
+ if __name__ == "__main__":
185
+ main()
parrot/lib/python3.10/site-packages/transformers/models/idefics2/image_processing_idefics2.py ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
22
+ from ...image_transforms import PaddingMode, pad, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ IMAGENET_STANDARD_MEAN,
25
+ IMAGENET_STANDARD_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ get_image_size,
30
+ infer_channel_dimension_format,
31
+ is_scaled_image,
32
+ is_valid_image,
33
+ to_numpy_array,
34
+ valid_images,
35
+ validate_preprocess_arguments,
36
+ )
37
+ from ...utils import TensorType, is_vision_available, logging
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ if is_vision_available():
44
+ import PIL
45
+ from PIL import Image
46
+
47
+
48
+ def get_resize_output_image_size(image, size, input_data_format) -> Tuple[int, int]:
49
+ """
50
+ Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
51
+
52
+ Args:
53
+ image (`np.ndarray`):
54
+ Image to resize.
55
+ size (`Dict[str, int]`):
56
+ Size of the output image containing the keys "shortest_edge" and "longest_edge".
57
+ input_data_format (`ChannelDimension` or `str`):
58
+ The channel dimension format of the input image.
59
+
60
+ Returns:
61
+ The output size of the image after resizing.
62
+ """
63
+ height, width = get_image_size(image, channel_dim=input_data_format)
64
+
65
+ min_len = size["shortest_edge"]
66
+ max_len = size["longest_edge"]
67
+ aspect_ratio = width / height
68
+
69
+ if width >= height and width > max_len:
70
+ width = max_len
71
+ height = int(width / aspect_ratio)
72
+ elif height > width and height > max_len:
73
+ height = max_len
74
+ width = int(height * aspect_ratio)
75
+ height = max(height, min_len)
76
+ width = max(width, min_len)
77
+ return height, width
78
+
79
+
80
+ def make_list_of_images(images: ImageInput) -> List[List[np.ndarray]]:
81
+ """
82
+ Convert a single image or a list of images to a list of numpy arrays.
83
+
84
+ Args:
85
+ images (`ImageInput`):
86
+ A single image or a list of images.
87
+
88
+ Returns:
89
+ A list of numpy arrays.
90
+ """
91
+ # If it's a single image, convert it to a list of lists
92
+ if is_valid_image(images):
93
+ images = [[images]]
94
+ # If it's a list of images, it's a single batch, so convert it to a list of lists
95
+ elif isinstance(images, (list, tuple)) and len(images) > 0 and is_valid_image(images[0]):
96
+ images = [images]
97
+ # If it's a list of batches, it's already in the right format
98
+ elif (
99
+ isinstance(images, (list, tuple))
100
+ and len(images) > 0
101
+ and isinstance(images[0], (list, tuple))
102
+ and is_valid_image(images[0][0])
103
+ ):
104
+ pass
105
+ else:
106
+ raise ValueError(
107
+ "Invalid input type. Must be a single image, a list of images, or a list of batches of images."
108
+ )
109
+ return images
110
+
111
+
112
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
113
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
114
+ """
115
+ Return the maximum value across all indices of an iterable of values.
116
+ """
117
+ return [max(values_i) for values_i in zip(*values)]
118
+
119
+
120
+ def get_max_height_width(
121
+ images_list: List[List[np.ndarray]], input_data_format: Optional[Union[str, ChannelDimension]] = None
122
+ ) -> List[int]:
123
+ """
124
+ Get the maximum height and width across all images in a batch.
125
+ """
126
+ if input_data_format is None:
127
+ input_data_format = infer_channel_dimension_format(images_list[0][0])
128
+
129
+ image_sizes = []
130
+ for images in images_list:
131
+ for image in images:
132
+ image_sizes.append(get_image_size(image, channel_dim=input_data_format))
133
+
134
+ max_height, max_width = max_across_indices(image_sizes)
135
+ return (max_height, max_width)
136
+
137
+
138
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
139
+ def make_pixel_mask(
140
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
141
+ ) -> np.ndarray:
142
+ """
143
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
144
+
145
+ Args:
146
+ image (`np.ndarray`):
147
+ Image to make the pixel mask for.
148
+ output_size (`Tuple[int, int]`):
149
+ Output size of the mask.
150
+ """
151
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
152
+ mask = np.zeros(output_size, dtype=np.int64)
153
+ mask[:input_height, :input_width] = 1
154
+ return mask
155
+
156
+
157
+ # FIXME Amy: merge this function with the one in image_transforms.py
158
+ def convert_to_rgb(image: ImageInput) -> ImageInput:
159
+ """
160
+ Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
161
+ as is.
162
+ Args:
163
+ image (Image):
164
+ The image to convert.
165
+ """
166
+ if not isinstance(image, PIL.Image.Image):
167
+ return image
168
+
169
+ # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
170
+ # for transparent images. The call to `alpha_composite` handles this case
171
+ if image.mode == "RGB":
172
+ return image
173
+
174
+ image_rgba = image.convert("RGBA")
175
+ background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
176
+ alpha_composite = Image.alpha_composite(background, image_rgba)
177
+ alpha_composite = alpha_composite.convert("RGB")
178
+ return alpha_composite
179
+
180
+
181
+ class Idefics2ImageProcessor(BaseImageProcessor):
182
+ r"""
183
+ Constructs a Idefics image processor.
184
+
185
+ Args:
186
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
187
+ Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA.
188
+ Only has an effect if the input image is in the PIL format.
189
+ do_resize (`bool`, *optional*, defaults to `True`):
190
+ Whether to resize the image. The longest edge of the image is resized to be <= `size["longest_edge"]`, with the
191
+ shortest edge resized to keep the input aspect ratio, with a minimum size of `size["shortest_edge"]`.
192
+ size (`Dict`, *optional*):
193
+ Controls the size of the output image. This is a dictionary containing the keys "shortest_edge" and "longest_edge".
194
+ resample (`Resampling`, *optional*, defaults to `Resampling.BILINEAR`):
195
+ Resampling filter to use when resizing the image.
196
+ do_rescale (`bool`, *optional*, defaults to `True`):
197
+ Whether to rescale the image. If set to `True`, the image is rescaled to have pixel values between 0 and 1.
198
+ rescale_factor (`float`, *optional*, defaults to `1/255`):
199
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
200
+ do_normalize (`bool`, *optional*, defaults to `True`):
201
+ Whether to normalize the image. If set to `True`, the image is normalized to have a mean of `image_mean` and
202
+ a standard deviation of `image_std`.
203
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
204
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
205
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
206
+ overridden by the `image_mean` parameter in the `preprocess` method.
207
+ image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
208
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
209
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
210
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
211
+ do_pad (`bool`, *optional*, defaults to `True`):
212
+ Whether or not to pad the images to the largest height and width in the batch and number of images per
213
+ sample in the batch, such that the returned tensor is of shape (batch_size, max_num_images, num_channels, max_height, max_width).
214
+ do_image_splitting (`bool`, *optional*, defaults to `False`):
215
+ Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. That
216
+ strategy was first introduced in https://arxiv.org/abs/2311.06607.
217
+ """
218
+
219
+ model_input_names = ["pixel_values"]
220
+
221
+ def __init__(
222
+ self,
223
+ do_convert_rgb: bool = True,
224
+ do_resize: bool = True,
225
+ size: Dict[str, int] = None,
226
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
227
+ do_rescale: bool = True,
228
+ rescale_factor: float = 1 / 255,
229
+ do_normalize: bool = True,
230
+ image_mean: Optional[Union[float, List[float]]] = None,
231
+ image_std: Optional[Union[float, List[float]]] = None,
232
+ do_pad: bool = True,
233
+ do_image_splitting: bool = False,
234
+ **kwargs,
235
+ ) -> None:
236
+ super().__init__(**kwargs)
237
+ self.do_convert_rgb = do_convert_rgb
238
+ self.do_resize = do_resize
239
+ self.size = size if size is not None else {"shortest_edge": 378, "longest_edge": 980}
240
+ self.resample = resample
241
+ self.do_rescale = do_rescale
242
+ self.rescale_factor = rescale_factor
243
+ self.do_normalize = do_normalize
244
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
245
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
246
+ self.do_pad = do_pad
247
+ self.do_image_splitting = do_image_splitting
248
+
249
+ def resize(
250
+ self,
251
+ image: np.ndarray,
252
+ size: Dict[str, int],
253
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
254
+ data_format: Optional[Union[str, ChannelDimension]] = None,
255
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
256
+ **kwargs,
257
+ ) -> np.ndarray:
258
+ """
259
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
260
+ resized to keep the input aspect ratio.
261
+
262
+ Args:
263
+ image (`np.ndarray`):
264
+ Image to resize.
265
+ size (`Dict[str, int]`):
266
+ Size of the output image.
267
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
268
+ Resampling filter to use when resiizing the image.
269
+ data_format (`str` or `ChannelDimension`, *optional*):
270
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
271
+ input_data_format (`ChannelDimension` or `str`, *optional*):
272
+ The channel dimension format of the input image. If not provided, it will be inferred.
273
+ """
274
+ if "shortest_edge" in size and "longest_edge" in size:
275
+ size = get_resize_output_image_size(image, size, input_data_format)
276
+ elif "height" in size and "width" in size:
277
+ size = (size["height"], size["width"])
278
+ else:
279
+ raise ValueError(
280
+ "size must be a dictionary with keys 'shortest_edge' and 'longest_edge' or 'height' and 'width'."
281
+ )
282
+ return resize(
283
+ image, size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
284
+ )
285
+
286
+ # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image
287
+ def _pad_image(
288
+ self,
289
+ image: np.ndarray,
290
+ output_size: Tuple[int, int],
291
+ constant_values: Union[float, Iterable[float]] = 0,
292
+ data_format: Optional[ChannelDimension] = None,
293
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
294
+ ) -> np.ndarray:
295
+ """
296
+ Pad an image with zeros to the given size.
297
+ """
298
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
299
+ output_height, output_width = output_size
300
+
301
+ pad_bottom = output_height - input_height
302
+ pad_right = output_width - input_width
303
+ padding = ((0, pad_bottom), (0, pad_right))
304
+ padded_image = pad(
305
+ image,
306
+ padding,
307
+ mode=PaddingMode.CONSTANT,
308
+ constant_values=constant_values,
309
+ data_format=data_format,
310
+ input_data_format=input_data_format,
311
+ )
312
+ return padded_image
313
+
314
+ def pad(
315
+ self,
316
+ images: List[np.ndarray],
317
+ constant_values: Union[float, Iterable[float]] = 0,
318
+ return_pixel_mask: bool = True,
319
+ return_tensors: Optional[Union[str, TensorType]] = None,
320
+ data_format: Optional[ChannelDimension] = None,
321
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
322
+ ) -> BatchFeature:
323
+ """
324
+ For a list of images, for each images, pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width.
325
+ For each sample in the batch, pads the sample with empty images to the max_number of images per sample in the batch. Optionally returns a pixel mask.
326
+
327
+ Args:
328
+ images (`np.ndarray`):
329
+ List of list of images to pad. Pads to the largest height and width in the batch.
330
+ constant_values (`float` or `Iterable[float]`, *optional*):
331
+ The value to use for the padding if `mode` is `"constant"`.
332
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
333
+ Whether to return a pixel mask.
334
+ return_tensors (`str` or `TensorType`, *optional*):
335
+ The type of tensors to return. Can be one of:
336
+ - Unset: Return a list of `np.ndarray`.
337
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
338
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
339
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
340
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
341
+ data_format (`str` or `ChannelDimension`, *optional*):
342
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
343
+ input_data_format (`ChannelDimension` or `str`, *optional*):
344
+ The channel dimension format of the input image. If not provided, it will be inferred.
345
+ """
346
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
347
+
348
+ batch_size = len(images)
349
+ max_num_images = max(len(images_) for images_ in images)
350
+ input_data_format = (
351
+ infer_channel_dimension_format(images[0][0]) if input_data_format is None else input_data_format
352
+ )
353
+ data_format = input_data_format if data_format is None else data_format
354
+
355
+ def empty_image(size, input_data_format):
356
+ if input_data_format == ChannelDimension.FIRST:
357
+ return np.zeros((3, *size), dtype=np.uint8)
358
+ elif input_data_format == ChannelDimension.LAST:
359
+ return np.zeros((*size, 3), dtype=np.uint8)
360
+ raise ValueError("Invalid channel dimension format.")
361
+
362
+ padded_images_list = [
363
+ [empty_image(pad_size, data_format) for _ in range(max_num_images)] for _ in range(batch_size)
364
+ ]
365
+ padded_masks = [[np.zeros(pad_size) for _ in range(max_num_images)] for _ in range(batch_size)]
366
+
367
+ for batch_idx in range(batch_size):
368
+ for sample_idx, image in enumerate(images[batch_idx]):
369
+ padded_images_list[batch_idx][sample_idx] = self._pad_image(
370
+ image,
371
+ pad_size,
372
+ constant_values=constant_values,
373
+ data_format=data_format,
374
+ input_data_format=input_data_format,
375
+ )
376
+ padded_masks[batch_idx][sample_idx] = make_pixel_mask(
377
+ image, output_size=pad_size, input_data_format=input_data_format
378
+ )
379
+
380
+ padded_masks = padded_masks if return_pixel_mask else None
381
+ return padded_images_list, padded_masks
382
+
383
+ def _crop(
384
+ self,
385
+ im: np.ndarray,
386
+ w1: int,
387
+ h1: int,
388
+ w2: int,
389
+ h2: int,
390
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
391
+ ) -> np.ndarray:
392
+ if input_data_format == ChannelDimension.FIRST:
393
+ return im[:, h1:h2, w1:w2]
394
+ elif input_data_format == ChannelDimension.LAST:
395
+ return im[h1:h2, w1:w2, :]
396
+
397
+ def split_image(
398
+ self,
399
+ image: np.ndarray,
400
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
401
+ ):
402
+ """
403
+ Split an image into 4 equal sub-images, and the concatenate that sequence with the original image.
404
+ That means that a single image becomes a sequence of 5 images.
405
+ This is a "trick" to spend more compute on each image with no changes in the vision encoder.
406
+
407
+ Args:
408
+ image (`np.ndarray`):
409
+ Images to split.
410
+ input_data_format (`ChannelDimension` or `str`, *optional*):
411
+ The channel dimension format of the input image. If not provided, it will be inferred.
412
+ """
413
+ height, width = get_image_size(image, input_data_format)
414
+
415
+ mid_width = width // 2
416
+ mid_height = height // 2
417
+ return [
418
+ self._crop(image, 0, 0, mid_width, mid_height, input_data_format),
419
+ self._crop(image, mid_width, 0, width, mid_height, input_data_format),
420
+ self._crop(image, 0, mid_height, mid_width, height, input_data_format),
421
+ self._crop(image, mid_width, mid_height, width, height, input_data_format),
422
+ image,
423
+ ]
424
+
425
+ def preprocess(
426
+ self,
427
+ images: ImageInput,
428
+ do_convert_rgb: Optional[bool] = None,
429
+ do_resize: Optional[bool] = None,
430
+ size: Optional[Dict[str, int]] = None,
431
+ resample: PILImageResampling = None,
432
+ do_rescale: Optional[bool] = None,
433
+ rescale_factor: Optional[float] = None,
434
+ do_normalize: Optional[bool] = None,
435
+ image_mean: Optional[Union[float, List[float]]] = None,
436
+ image_std: Optional[Union[float, List[float]]] = None,
437
+ do_pad: Optional[bool] = None,
438
+ do_image_splitting: Optional[bool] = None,
439
+ return_tensors: Optional[Union[str, TensorType]] = None,
440
+ input_data_format: Optional[ChannelDimension] = None,
441
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
442
+ ):
443
+ """
444
+ Preprocess a batch of images.
445
+
446
+ Args:
447
+ images (`ImageInput`):
448
+ A list of images to preprocess.
449
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
450
+ Whether to convert the image to RGB.
451
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
452
+ Whether to resize the image.
453
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
454
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
455
+ the longest edge resized to keep the input aspect ratio.
456
+ resample (`int`, *optional*, defaults to `self.resample`):
457
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
458
+ has an effect if `do_resize` is set to `True`.
459
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
460
+ Whether to rescale the image.
461
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
462
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
463
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
464
+ Whether to normalize the image.
465
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
466
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
467
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
468
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
469
+ `True`.
470
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
471
+ Whether or not to pad the images to the largest height and width in the batch.
472
+ do_image_splitting (`bool`, *optional*, defaults to `self.do_image_splitting`):
473
+ Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. That
474
+ strategy was first introduced in https://arxiv.org/abs/2311.06607.
475
+ return_tensors (`str` or `TensorType`, *optional*):
476
+ The type of tensors to return. Can be one of:
477
+ - Unset: Return a list of `np.ndarray`.
478
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
479
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
480
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
481
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
482
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
483
+ The channel dimension format for the output image. Can be one of:
484
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
485
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
486
+ - Unset: Use the channel dimension format of the input image.
487
+ input_data_format (`ChannelDimension` or `str`, *optional*):
488
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
489
+ from the input image. Can be one of:
490
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
491
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
492
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
493
+ """
494
+ do_resize = do_resize if do_resize is not None else self.do_resize
495
+ size = size if size is not None else self.size
496
+ resample = resample if resample is not None else self.resample
497
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
498
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
499
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
500
+ image_mean = image_mean if image_mean is not None else self.image_mean
501
+ image_std = image_std if image_std is not None else self.image_std
502
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
503
+ do_pad = do_pad if do_pad is not None else self.do_pad
504
+ do_image_splitting = do_image_splitting if do_image_splitting is not None else self.do_image_splitting
505
+
506
+ images_list = make_list_of_images(images)
507
+
508
+ if not valid_images(images_list[0]):
509
+ raise ValueError(
510
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
511
+ "torch.Tensor, tf.Tensor or jax.ndarray."
512
+ )
513
+
514
+ validate_preprocess_arguments(
515
+ do_rescale=do_rescale,
516
+ rescale_factor=rescale_factor,
517
+ do_normalize=do_normalize,
518
+ image_mean=image_mean,
519
+ image_std=image_std,
520
+ do_resize=do_resize,
521
+ size=size,
522
+ resample=resample,
523
+ )
524
+
525
+ if do_convert_rgb:
526
+ images_list = [[convert_to_rgb(image) for image in images] for images in images_list]
527
+
528
+ # All transformations expect numpy arrays.
529
+ images_list = [[to_numpy_array(image) for image in images] for images in images_list]
530
+
531
+ if is_scaled_image(images_list[0][0]) and do_rescale:
532
+ logger.warning_once(
533
+ "It looks like you are trying to rescale already rescaled images. If the input"
534
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
535
+ )
536
+
537
+ if input_data_format is None:
538
+ # We assume that all images have the same channel dimension format.
539
+ input_data_format = infer_channel_dimension_format(images_list[0][0])
540
+
541
+ if do_image_splitting:
542
+ new_images_list = []
543
+ for images in images_list:
544
+ new_images = []
545
+ for image in images:
546
+ new_images.extend(self.split_image(image, input_data_format))
547
+ new_images_list.append(new_images)
548
+ images_list = new_images_list
549
+
550
+ if do_resize:
551
+ images_list = [
552
+ [
553
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
554
+ for image in images
555
+ ]
556
+ for images in images_list
557
+ ]
558
+
559
+ if do_rescale:
560
+ images_list = [
561
+ [
562
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
563
+ for image in images
564
+ ]
565
+ for images in images_list
566
+ ]
567
+
568
+ if do_normalize:
569
+ images_list = [
570
+ [
571
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
572
+ for image in images
573
+ ]
574
+ for images in images_list
575
+ ]
576
+
577
+ pixel_attention_mask = None
578
+ if do_pad:
579
+ images_list, pixel_attention_mask = self.pad(
580
+ images_list, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=input_data_format
581
+ )
582
+
583
+ if data_format is not None:
584
+ images_list = [
585
+ [
586
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
587
+ for image in images
588
+ ]
589
+ for images in images_list
590
+ ]
591
+
592
+ data = {"pixel_values": np.array(images_list) if do_pad else images_list} # Faster tensor conversion
593
+ if pixel_attention_mask is not None:
594
+ data["pixel_attention_mask"] = np.array(pixel_attention_mask) if do_pad else pixel_attention_mask
595
+
596
+ return BatchFeature(data=data, tensor_type=return_tensors)
parrot/lib/python3.10/site-packages/transformers/models/idefics2/modeling_idefics2.py ADDED
@@ -0,0 +1,1962 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Idefics2 model."""
16
+
17
+ import inspect
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import CrossEntropyLoss
27
+
28
+ from ... import PreTrainedModel
29
+ from ...activations import ACT2FN
30
+ from ...cache_utils import Cache, DynamicCache
31
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
32
+ from ...modeling_outputs import BaseModelOutput, ModelOutput
33
+ from ...utils import (
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ is_flash_attn_2_available,
37
+ is_flash_attn_greater_or_equal_2_10,
38
+ logging,
39
+ replace_return_docstrings,
40
+ )
41
+ from ..auto import AutoModel
42
+ from .configuration_idefics2 import Idefics2Config, Idefics2VisionConfig
43
+
44
+
45
+ if is_flash_attn_2_available():
46
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
47
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
48
+
49
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
50
+
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+ _CONFIG_FOR_DOC = "Idefics2Config"
55
+
56
+
57
+ @dataclass
58
+ class Idefics2BaseModelOutputWithPast(ModelOutput):
59
+ """
60
+ Base class for Idefics2 model's outputs that may also contain a past key/values (to speed up sequential decoding).
61
+ Args:
62
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
63
+ Sequence of hidden-states at the output of the last layer of the model.
64
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
65
+ hidden_size)` is output.
66
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
67
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
68
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
69
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
70
+ encoder_sequence_length, embed_size_per_head)`.
71
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
72
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
73
+ input) to speed up sequential decoding.
74
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
75
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
76
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
77
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
78
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
79
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
80
+ sequence_length)`.
81
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
82
+ heads.
83
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
84
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
85
+ sequence_length, hidden_size)`.
86
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
87
+ """
88
+
89
+ last_hidden_state: torch.FloatTensor = None
90
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
91
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
92
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
93
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
94
+
95
+
96
+ @dataclass
97
+ # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->Idefics2
98
+ class Idefics2CausalLMOutputWithPast(ModelOutput):
99
+ """
100
+ Base class for Idefics2 causal language model (or autoregressive) outputs.
101
+ Args:
102
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
103
+ Language modeling loss (for next-token prediction).
104
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
105
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
106
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
107
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
108
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
109
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
110
+ `past_key_values` input) to speed up sequential decoding.
111
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
112
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
113
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
114
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
115
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
116
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
117
+ sequence_length)`.
118
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
119
+ heads.
120
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
121
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
122
+ sequence_length, hidden_size)`.
123
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
124
+ """
125
+
126
+ loss: Optional[torch.FloatTensor] = None
127
+ logits: torch.FloatTensor = None
128
+ past_key_values: Optional[List[torch.FloatTensor]] = None
129
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
130
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
131
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
132
+
133
+
134
+ class Idefics2VisionEmbeddings(nn.Module):
135
+ """
136
+ This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable
137
+ resolution.
138
+
139
+ The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://arxiv.org/abs/2307.06304)
140
+ which allows treating images in their native aspect ratio and without the need to resize them to the same
141
+ fixed size. In particular, we start from the original pre-trained SigLIP model
142
+ (which uses images of fixed-size square images) and adapt it by training on images of variable resolutions.
143
+ """
144
+
145
+ def __init__(self, config: Idefics2VisionConfig):
146
+ super().__init__()
147
+ self.embed_dim = config.hidden_size
148
+ self.image_size = config.image_size
149
+ self.patch_size = config.patch_size
150
+
151
+ self.patch_embedding = nn.Conv2d(
152
+ in_channels=config.num_channels,
153
+ out_channels=self.embed_dim,
154
+ kernel_size=self.patch_size,
155
+ stride=self.patch_size,
156
+ padding="valid",
157
+ )
158
+
159
+ self.num_patches_per_side = self.image_size // self.patch_size
160
+ self.num_patches = self.num_patches_per_side**2
161
+ self.num_positions = self.num_patches
162
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
163
+
164
+ def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor:
165
+ batch_size, _, max_im_h, max_im_w = pixel_values.shape
166
+
167
+ patch_embeds = self.patch_embedding(pixel_values)
168
+ embeddings = patch_embeds.flatten(2).transpose(1, 2)
169
+
170
+ max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
171
+ boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side)
172
+ position_ids = torch.full(size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0)
173
+
174
+ for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
175
+ nb_patches_h = p_attn_mask[:, 0].sum()
176
+ nb_patches_w = p_attn_mask[0].sum()
177
+
178
+ fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
179
+ fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
180
+
181
+ bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
182
+ bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
183
+
184
+ pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
185
+ position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
186
+
187
+ position_ids = position_ids.to(self.position_embedding.weight.device)
188
+ embeddings = embeddings + self.position_embedding(position_ids)
189
+ return embeddings
190
+
191
+
192
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipAttention with Siglip->Idefics2Vision
193
+ class Idefics2VisionAttention(nn.Module):
194
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
195
+
196
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
197
+ def __init__(self, config):
198
+ super().__init__()
199
+ self.config = config
200
+ self.embed_dim = config.hidden_size
201
+ self.num_heads = config.num_attention_heads
202
+ self.head_dim = self.embed_dim // self.num_heads
203
+ if self.head_dim * self.num_heads != self.embed_dim:
204
+ raise ValueError(
205
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
206
+ f" {self.num_heads})."
207
+ )
208
+ self.scale = self.head_dim**-0.5
209
+ self.dropout = config.attention_dropout
210
+
211
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
212
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
213
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
214
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
215
+
216
+ # Ignore copy
217
+ self.is_causal = False
218
+
219
+ def forward(
220
+ self,
221
+ hidden_states: torch.Tensor,
222
+ attention_mask: Optional[torch.Tensor] = None,
223
+ output_attentions: Optional[bool] = False,
224
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
225
+ """Input shape: Batch x Time x Channel"""
226
+
227
+ batch_size, q_len, _ = hidden_states.size()
228
+
229
+ query_states = self.q_proj(hidden_states)
230
+ key_states = self.k_proj(hidden_states)
231
+ value_states = self.v_proj(hidden_states)
232
+
233
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
234
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
235
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
236
+
237
+ k_v_seq_len = key_states.shape[-2]
238
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
239
+
240
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
241
+ raise ValueError(
242
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
243
+ f" {attn_weights.size()}"
244
+ )
245
+
246
+ if attention_mask is not None:
247
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
248
+ raise ValueError(
249
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
250
+ )
251
+ attn_weights = attn_weights + attention_mask
252
+
253
+ # upcast attention to fp32
254
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
255
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
256
+ attn_output = torch.matmul(attn_weights, value_states)
257
+
258
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
259
+ raise ValueError(
260
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
261
+ f" {attn_output.size()}"
262
+ )
263
+
264
+ attn_output = attn_output.transpose(1, 2).contiguous()
265
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
266
+
267
+ attn_output = self.out_proj(attn_output)
268
+
269
+ return attn_output, attn_weights
270
+
271
+
272
+ class Idefics2VisionFlashAttention2(Idefics2VisionAttention):
273
+ """
274
+ Idefics2Vision flash attention module. This module inherits from `Idefics2VisionAttention` as the weights of the module stays
275
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
276
+ flash attention and deal with padding tokens in case the input contains any of them.
277
+ """
278
+
279
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
280
+ def __init__(self, *args, **kwargs):
281
+ super().__init__(*args, **kwargs)
282
+
283
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
284
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
285
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
286
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
287
+
288
+ def forward(
289
+ self,
290
+ hidden_states: torch.Tensor,
291
+ attention_mask: Optional[torch.LongTensor] = None,
292
+ position_ids: Optional[torch.LongTensor] = None,
293
+ past_key_value: Optional[Cache] = None,
294
+ output_attentions: bool = False,
295
+ use_cache: bool = False,
296
+ **kwargs,
297
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
298
+ output_attentions = False
299
+
300
+ bsz, q_len, _ = hidden_states.size()
301
+
302
+ query_states = self.q_proj(hidden_states)
303
+ key_states = self.k_proj(hidden_states)
304
+ value_states = self.v_proj(hidden_states)
305
+
306
+ # Flash attention requires the input to have the shape
307
+ # batch_size x seq_length x head_dim x hidden_dim
308
+ # therefore we just need to keep the original shape
309
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
310
+ key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
311
+ value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
312
+
313
+ kv_seq_len = key_states.shape[-2]
314
+ if past_key_value is not None:
315
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
316
+
317
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
318
+ # to be able to avoid many of these transpose/reshape/view.
319
+ query_states = query_states.transpose(1, 2)
320
+ key_states = key_states.transpose(1, 2)
321
+ value_states = value_states.transpose(1, 2)
322
+
323
+ dropout_rate = self.dropout if self.training else 0.0
324
+
325
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
326
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
327
+ # cast them back in the correct dtype just to be sure everything works as expected.
328
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
329
+ # in fp32. (Idefics2VisionRMSNorm handles it correctly)
330
+
331
+ input_dtype = query_states.dtype
332
+ if input_dtype == torch.float32:
333
+ if torch.is_autocast_enabled():
334
+ target_dtype = torch.get_autocast_gpu_dtype()
335
+ # Handle the case where the model is quantized
336
+ elif hasattr(self.config, "_pre_quantization_dtype"):
337
+ target_dtype = self.config._pre_quantization_dtype
338
+ else:
339
+ target_dtype = self.q_proj.weight.dtype
340
+
341
+ logger.warning_once(
342
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
343
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
344
+ f" {target_dtype}."
345
+ )
346
+
347
+ query_states = query_states.to(target_dtype)
348
+ key_states = key_states.to(target_dtype)
349
+ value_states = value_states.to(target_dtype)
350
+
351
+ attn_output = self._flash_attention_forward(
352
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
353
+ )
354
+
355
+ attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous()
356
+ attn_output = self.out_proj(attn_output)
357
+
358
+ if not output_attentions:
359
+ attn_weights = None
360
+
361
+ return attn_output, attn_weights
362
+
363
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
364
+ def _flash_attention_forward(
365
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
366
+ ):
367
+ """
368
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
369
+ first unpad the input, then computes the attention scores and pad the final attention scores.
370
+
371
+ Args:
372
+ query_states (`torch.Tensor`):
373
+ Input query states to be passed to Flash Attention API
374
+ key_states (`torch.Tensor`):
375
+ Input key states to be passed to Flash Attention API
376
+ value_states (`torch.Tensor`):
377
+ Input value states to be passed to Flash Attention API
378
+ attention_mask (`torch.Tensor`):
379
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
380
+ position of padding tokens and 1 for the position of non-padding tokens.
381
+ dropout (`float`):
382
+ Attention dropout
383
+ softmax_scale (`float`, *optional*):
384
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
385
+ """
386
+ if not self._flash_attn_uses_top_left_mask:
387
+ causal = self.is_causal
388
+ else:
389
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
390
+ causal = self.is_causal and query_length != 1
391
+
392
+ # Contains at least one padding token in the sequence
393
+ if attention_mask is not None:
394
+ batch_size = query_states.shape[0]
395
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
396
+ query_states, key_states, value_states, attention_mask, query_length
397
+ )
398
+
399
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
400
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
401
+
402
+ attn_output_unpad = flash_attn_varlen_func(
403
+ query_states,
404
+ key_states,
405
+ value_states,
406
+ cu_seqlens_q=cu_seqlens_q,
407
+ cu_seqlens_k=cu_seqlens_k,
408
+ max_seqlen_q=max_seqlen_in_batch_q,
409
+ max_seqlen_k=max_seqlen_in_batch_k,
410
+ dropout_p=dropout,
411
+ softmax_scale=softmax_scale,
412
+ causal=causal,
413
+ )
414
+
415
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
416
+ else:
417
+ attn_output = flash_attn_func(
418
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
419
+ )
420
+
421
+ return attn_output
422
+
423
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
424
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
425
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
426
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
427
+
428
+ key_layer = index_first_axis(
429
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
430
+ )
431
+ value_layer = index_first_axis(
432
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
433
+ )
434
+ if query_length == kv_seq_len:
435
+ query_layer = index_first_axis(
436
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
437
+ )
438
+ cu_seqlens_q = cu_seqlens_k
439
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
440
+ indices_q = indices_k
441
+ elif query_length == 1:
442
+ max_seqlen_in_batch_q = 1
443
+ cu_seqlens_q = torch.arange(
444
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
445
+ ) # There is a memcpy here, that is very bad.
446
+ indices_q = cu_seqlens_q[:-1]
447
+ query_layer = query_layer.squeeze(1)
448
+ else:
449
+ # The -q_len: slice assumes left padding.
450
+ attention_mask = attention_mask[:, -query_length:]
451
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
452
+
453
+ return (
454
+ query_layer,
455
+ key_layer,
456
+ value_layer,
457
+ indices_q,
458
+ (cu_seqlens_q, cu_seqlens_k),
459
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
460
+ )
461
+
462
+
463
+ IDEFICS_VISION_ATTENTION_CLASSES = {
464
+ "eager": Idefics2VisionAttention,
465
+ "flash_attention_2": Idefics2VisionFlashAttention2,
466
+ }
467
+
468
+
469
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipMLP with Siglip->Idefics2Vision
470
+ class Idefics2VisionMLP(nn.Module):
471
+ def __init__(self, config):
472
+ super().__init__()
473
+ self.config = config
474
+ self.activation_fn = ACT2FN[config.hidden_act]
475
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
476
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
477
+
478
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
479
+ hidden_states = self.fc1(hidden_states)
480
+ hidden_states = self.activation_fn(hidden_states)
481
+ hidden_states = self.fc2(hidden_states)
482
+ return hidden_states
483
+
484
+
485
+ class Idefics2MLP(nn.Module):
486
+ def __init__(
487
+ self,
488
+ hidden_size: int,
489
+ intermediate_size: int,
490
+ output_size: int,
491
+ hidden_act: str,
492
+ ):
493
+ super().__init__()
494
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
495
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
496
+ self.down_proj = nn.Linear(intermediate_size, output_size, bias=False)
497
+ self.act_fn = ACT2FN[hidden_act]
498
+
499
+ def forward(self, x):
500
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
501
+
502
+
503
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipMultiheadAttentionPoolingHead with Siglip->Idefics2
504
+ class Idefics2MultiheadAttentionPoolingHead(nn.Module):
505
+ """Multihead Attention Pooling."""
506
+
507
+ def __init__(self, config: Idefics2VisionConfig):
508
+ super().__init__()
509
+
510
+ self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size))
511
+ self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)
512
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
513
+ # Ignore copy
514
+ self.mlp = Idefics2MLP(
515
+ hidden_size=config.hidden_size,
516
+ intermediate_size=config.intermediate_size,
517
+ hidden_act=config.hidden_act,
518
+ output_size=config.hidden_size,
519
+ )
520
+
521
+ def forward(self, hidden_state):
522
+ batch_size = hidden_state.shape[0]
523
+ probe = self.probe.repeat(batch_size, 1, 1)
524
+
525
+ hidden_state = self.attention(probe, hidden_state, hidden_state)[0]
526
+
527
+ residual = hidden_state
528
+ hidden_state = self.layernorm(hidden_state)
529
+ hidden_state = residual + self.mlp(hidden_state)
530
+
531
+ return hidden_state[:, 0]
532
+
533
+
534
+ class Idefics2EncoderLayer(nn.Module):
535
+ def __init__(self, config: Idefics2Config):
536
+ super().__init__()
537
+ self.embed_dim = config.hidden_size
538
+ self.self_attn = IDEFICS_VISION_ATTENTION_CLASSES[config._attn_implementation](config)
539
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
540
+ self.mlp = Idefics2VisionMLP(config)
541
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
542
+
543
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoderLayer.forward
544
+ def forward(
545
+ self,
546
+ hidden_states: torch.Tensor,
547
+ attention_mask: torch.Tensor,
548
+ output_attentions: Optional[bool] = False,
549
+ ) -> Tuple[torch.FloatTensor]:
550
+ """
551
+ Args:
552
+ hidden_states (`torch.FloatTensor`):
553
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
554
+ attention_mask (`torch.FloatTensor`):
555
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
556
+ output_attentions (`bool`, *optional*, defaults to `False`):
557
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
558
+ returned tensors for more detail.
559
+ """
560
+ residual = hidden_states
561
+
562
+ hidden_states = self.layer_norm1(hidden_states)
563
+ hidden_states, attn_weights = self.self_attn(
564
+ hidden_states=hidden_states,
565
+ attention_mask=attention_mask,
566
+ output_attentions=output_attentions,
567
+ )
568
+ hidden_states = residual + hidden_states
569
+
570
+ residual = hidden_states
571
+ hidden_states = self.layer_norm2(hidden_states)
572
+ hidden_states = self.mlp(hidden_states)
573
+ hidden_states = residual + hidden_states
574
+
575
+ outputs = (hidden_states,)
576
+
577
+ if output_attentions:
578
+ outputs += (attn_weights,)
579
+
580
+ return outputs
581
+
582
+
583
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoder with Siglip->Idefics2
584
+ class Idefics2Encoder(nn.Module):
585
+ """
586
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
587
+ [`Idefics2EncoderLayer`].
588
+
589
+ Args:
590
+ config: Idefics2Config
591
+ """
592
+
593
+ def __init__(self, config: Idefics2Config):
594
+ super().__init__()
595
+ self.config = config
596
+ self.layers = nn.ModuleList([Idefics2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
597
+ self.gradient_checkpointing = False
598
+
599
+ # Ignore copy
600
+ def forward(
601
+ self,
602
+ inputs_embeds,
603
+ attention_mask: Optional[torch.Tensor] = None,
604
+ output_attentions: Optional[bool] = None,
605
+ output_hidden_states: Optional[bool] = None,
606
+ return_dict: Optional[bool] = None,
607
+ ) -> Union[Tuple, BaseModelOutput]:
608
+ r"""
609
+ Args:
610
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
611
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
612
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
613
+ than the model's internal embedding lookup matrix.
614
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
615
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
616
+
617
+ - 1 for tokens that are **not masked**,
618
+ - 0 for tokens that are **masked**.
619
+
620
+ [What are attention masks?](../glossary#attention-mask)
621
+ output_attentions (`bool`, *optional*):
622
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
623
+ returned tensors for more detail.
624
+ output_hidden_states (`bool`, *optional*):
625
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
626
+ for more detail.
627
+ return_dict (`bool`, *optional*):
628
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
629
+ """
630
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
631
+ output_hidden_states = (
632
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
633
+ )
634
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
635
+
636
+ encoder_states = () if output_hidden_states else None
637
+ all_attentions = () if output_attentions else None
638
+
639
+ hidden_states = inputs_embeds
640
+ for encoder_layer in self.layers:
641
+ if output_hidden_states:
642
+ encoder_states = encoder_states + (hidden_states,)
643
+ if self.gradient_checkpointing and self.training:
644
+ layer_outputs = self._gradient_checkpointing_func(
645
+ encoder_layer.__call__,
646
+ hidden_states,
647
+ attention_mask,
648
+ output_attentions,
649
+ )
650
+ else:
651
+ layer_outputs = encoder_layer(
652
+ hidden_states,
653
+ attention_mask,
654
+ output_attentions=output_attentions,
655
+ )
656
+
657
+ hidden_states = layer_outputs[0]
658
+
659
+ if output_attentions:
660
+ all_attentions = all_attentions + (layer_outputs[1],)
661
+
662
+ if output_hidden_states:
663
+ encoder_states = encoder_states + (hidden_states,)
664
+
665
+ if not return_dict:
666
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
667
+ return BaseModelOutput(
668
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
669
+ )
670
+
671
+
672
+ class Idefics2VisionTransformer(nn.Module):
673
+ def __init__(self, config: Idefics2VisionConfig):
674
+ super().__init__()
675
+ embed_dim = config.hidden_size
676
+
677
+ self.config = config
678
+ self.embeddings = Idefics2VisionEmbeddings(config)
679
+ self.encoder = Idefics2Encoder(config)
680
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
681
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
682
+
683
+ def get_input_embeddings(self):
684
+ return self.embeddings
685
+
686
+ def set_input_embeddings(self, value):
687
+ self.embeddings = value
688
+
689
+ def forward(
690
+ self,
691
+ pixel_values,
692
+ patch_attention_mask: Optional[torch.BoolTensor] = None,
693
+ output_attentions: Optional[bool] = None,
694
+ output_hidden_states: Optional[bool] = None,
695
+ return_dict: Optional[bool] = None,
696
+ ) -> Union[Tuple, BaseModelOutput]:
697
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
698
+ output_hidden_states = (
699
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
700
+ )
701
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
702
+
703
+ batch_size = pixel_values.size(0)
704
+ if patch_attention_mask is None:
705
+ patch_size = self.config.patch_size
706
+ patch_attention_mask = torch.ones(
707
+ (
708
+ batch_size,
709
+ pixel_values.size(2) // patch_size,
710
+ pixel_values.size(3) // patch_size,
711
+ )
712
+ )
713
+ patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device)
714
+
715
+ hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
716
+
717
+ patch_attention_mask = patch_attention_mask.view(batch_size, -1)
718
+ # The call to `_upad_input` in `_flash_attention_forward` is expensive
719
+ # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
720
+ # avoiding passing the attention_mask, which is equivalent to attending to the full sequence
721
+ if not torch.any(~patch_attention_mask):
722
+ patch_attention_mask = None
723
+ elif not self._use_flash_attention_2:
724
+ patch_attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
725
+
726
+ encoder_outputs = self.encoder(
727
+ inputs_embeds=hidden_states,
728
+ attention_mask=patch_attention_mask,
729
+ output_attentions=output_attentions,
730
+ output_hidden_states=output_hidden_states,
731
+ return_dict=return_dict,
732
+ )
733
+
734
+ last_hidden_state = encoder_outputs[0]
735
+ last_hidden_state = self.post_layernorm(last_hidden_state)
736
+
737
+ if not return_dict:
738
+ return (last_hidden_state,) + encoder_outputs[1:]
739
+
740
+ return BaseModelOutput(
741
+ last_hidden_state=last_hidden_state,
742
+ hidden_states=encoder_outputs.hidden_states,
743
+ attentions=encoder_outputs.attentions,
744
+ )
745
+
746
+
747
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
748
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
749
+ """
750
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
751
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
752
+ """
753
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
754
+ if n_rep == 1:
755
+ return hidden_states
756
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
757
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
758
+
759
+
760
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
761
+ def _get_unpad_data(attention_mask):
762
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
763
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
764
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
765
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
766
+ return (
767
+ indices,
768
+ cu_seqlens,
769
+ max_seqlen_in_batch,
770
+ )
771
+
772
+
773
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Idefics2
774
+ class Idefics2RMSNorm(nn.Module):
775
+ def __init__(self, hidden_size, eps=1e-6):
776
+ """
777
+ Idefics2RMSNorm is equivalent to T5LayerNorm
778
+ """
779
+ super().__init__()
780
+ self.weight = nn.Parameter(torch.ones(hidden_size))
781
+ self.variance_epsilon = eps
782
+
783
+ def forward(self, hidden_states):
784
+ input_dtype = hidden_states.dtype
785
+ hidden_states = hidden_states.to(torch.float32)
786
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
787
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
788
+ return self.weight * hidden_states.to(input_dtype)
789
+
790
+
791
+ class Idefics2PerceiverAttention(nn.Module):
792
+ def __init__(self, config, layer_idx: Optional[int] = None) -> None:
793
+ """Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
794
+ super().__init__()
795
+
796
+ self.layer_idx = None
797
+ self.hidden_size = config.text_config.hidden_size
798
+ self.num_heads = config.perceiver_config.resampler_n_heads
799
+ self.head_dim = config.perceiver_config.resampler_head_dim
800
+ self.num_key_value_heads = config.perceiver_config.num_key_value_heads
801
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
802
+ self.attention_dropout = config.perceiver_config.attention_dropout
803
+
804
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
805
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
806
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
807
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
808
+
809
+ self.is_causal = False
810
+
811
+ def forward(
812
+ self,
813
+ latents: torch.Tensor,
814
+ context: torch.Tensor,
815
+ attention_mask: Optional[torch.Tensor] = None,
816
+ position_ids: Optional[torch.LongTensor] = None,
817
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
818
+ output_attentions: bool = False,
819
+ use_cache: bool = False,
820
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
821
+ """
822
+ Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
823
+
824
+ Args:
825
+ latents (`torch.Tensor`): Tensor of shape [bsz, n_latents, embed_dim] representing fixed length latents to compress to.
826
+ context (`torch.Tensor`): Tensor of shape [bsz, seq, embed_dim] representing long-form context to resample.
827
+ attention_mask (`torch.Tensor`, *optional*): Tensor of shape [bsz, 1, seq, n_latents] representing attention mask.
828
+ position_ids (`torch.LongTensor`, *optional*): Tensor of shape [bsz, seq] representing position indices of each input token.
829
+ past_key_value (`Tuple[torch.Tensor]`, *optional*): Tuple of tensors containing cached key and value states.
830
+ output_attentions (`bool`, *optional*, defaults to `False`): Whether to return attention weights.
831
+ use_cache (`bool`, *optional*, defaults to `False`): Whether to use past_key_value for caching.
832
+ """
833
+ bsz, q_len, _ = latents.size()
834
+ kv_seq_len = q_len + context.size()[1]
835
+
836
+ hidden_states = torch.concat([context, latents], dim=-2)
837
+
838
+ query_states = self.q_proj(latents)
839
+ key_states = self.k_proj(hidden_states)
840
+ value_states = self.v_proj(hidden_states)
841
+
842
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
843
+ key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
844
+ value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
845
+
846
+ past_key_value = getattr(self, "past_key_value", past_key_value)
847
+
848
+ if past_key_value is not None:
849
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
850
+
851
+ # repeat k/v heads if n_kv_heads < n_heads
852
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
853
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
854
+
855
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
856
+
857
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
858
+ raise ValueError(
859
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
860
+ f" {attn_weights.size()}"
861
+ )
862
+
863
+ if attention_mask is not None:
864
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
865
+ raise ValueError(
866
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
867
+ )
868
+
869
+ attn_weights = attn_weights + attention_mask
870
+
871
+ # upcast attention to fp32
872
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
873
+ attn_output = torch.matmul(attn_weights, value_states)
874
+
875
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
876
+ raise ValueError(
877
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
878
+ f" {attn_output.size()}"
879
+ )
880
+
881
+ attn_output = attn_output.transpose(1, 2).contiguous()
882
+ attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim)
883
+
884
+ attn_output = self.o_proj(attn_output)
885
+
886
+ if not output_attentions:
887
+ attn_weights = None
888
+
889
+ return attn_output, attn_weights, past_key_value
890
+
891
+
892
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with MistralAttention->Idefics2PerceiverAttention,MistralFlashAttention->Idefics2PerceiverFlashAttention,Mistral->Idefics2
893
+ class Idefics2PerceiverFlashAttention2(Idefics2PerceiverAttention):
894
+ """
895
+ Idefics2 flash attention module. This module inherits from `Idefics2PerceiverAttention` as the weights of the module stays
896
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
897
+ flash attention and deal with padding tokens in case the input contains any of them.
898
+ """
899
+
900
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
901
+ def __init__(self, *args, **kwargs):
902
+ super().__init__(*args, **kwargs)
903
+
904
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
905
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
906
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
907
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
908
+
909
+ # Ignore copy
910
+ def forward(
911
+ self,
912
+ latents: torch.Tensor,
913
+ context: torch.Tensor,
914
+ attention_mask: Optional[torch.LongTensor] = None,
915
+ position_ids: Optional[torch.LongTensor] = None,
916
+ past_key_value: Optional[Cache] = None,
917
+ output_attentions: bool = False,
918
+ use_cache: bool = False,
919
+ **kwargs,
920
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
921
+ bsz, q_len, _ = latents.size()
922
+ kv_seq_len = q_len + context.size()[1]
923
+
924
+ # Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn!
925
+ # Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents`
926
+ query_states = self.q_proj(latents)
927
+ key_states = self.k_proj(torch.cat([context, latents], dim=-2))
928
+ value_states = self.v_proj(torch.cat([context, latents], dim=-2))
929
+
930
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
931
+ key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
932
+ value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
933
+
934
+ kv_seq_len = key_states.shape[-2]
935
+ if past_key_value is not None:
936
+ kv_seq_len += past_key_value[0].shape[-2]
937
+
938
+ if past_key_value is not None:
939
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
940
+ if hasattr(self.config, "sliding_window") and kv_seq_len > self.config.sliding_window:
941
+ slicing_tokens = kv_seq_len - self.config.sliding_window
942
+
943
+ past_key = past_key_value[0]
944
+ past_value = past_key_value[1]
945
+
946
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
947
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
948
+
949
+ if past_key.shape[-2] != self.config.sliding_window - 1:
950
+ raise ValueError(
951
+ "past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1,"
952
+ f" head_dim`), got {past_key.shape}"
953
+ )
954
+
955
+ past_key_value = (past_key, past_value)
956
+
957
+ if attention_mask is not None:
958
+ attention_mask = attention_mask[:, slicing_tokens:]
959
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
960
+
961
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
962
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
963
+
964
+ past_key_value = (key_states, value_states) if use_cache else None
965
+
966
+ # repeat k/v heads if n_kv_heads < n_heads
967
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
968
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
969
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
970
+
971
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
972
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
973
+ # cast them back in float16 just to be sure everything works as expected.
974
+ input_dtype = query_states.dtype
975
+ if input_dtype == torch.float32:
976
+ if torch.is_autocast_enabled():
977
+ target_dtype = torch.get_autocast_gpu_dtype()
978
+ # Handle the case where the model is quantized
979
+ elif hasattr(self.config, "_pre_quantization_dtype"):
980
+ target_dtype = self.config._pre_quantization_dtype
981
+ else:
982
+ target_dtype = self.q_proj.weight.dtype
983
+
984
+ logger.warning_once(
985
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
986
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
987
+ f" {target_dtype}."
988
+ )
989
+
990
+ query_states = query_states.to(target_dtype)
991
+ key_states = key_states.to(target_dtype)
992
+ value_states = value_states.to(target_dtype)
993
+
994
+ # Reashape to the expected shape for Flash Attention
995
+ query_states = query_states.transpose(1, 2)
996
+ key_states = key_states.transpose(1, 2)
997
+ value_states = value_states.transpose(1, 2)
998
+
999
+ attn_output = self._flash_attention_forward(
1000
+ query_states,
1001
+ key_states,
1002
+ value_states,
1003
+ attention_mask,
1004
+ q_len,
1005
+ dropout=dropout_rate,
1006
+ use_sliding_windows=False,
1007
+ )
1008
+
1009
+ attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim).contiguous()
1010
+ attn_output = self.o_proj(attn_output)
1011
+
1012
+ if not output_attentions:
1013
+ attn_weights = None
1014
+
1015
+ return attn_output, attn_weights, past_key_value
1016
+
1017
+ def _flash_attention_forward(
1018
+ self,
1019
+ query_states,
1020
+ key_states,
1021
+ value_states,
1022
+ attention_mask,
1023
+ query_length,
1024
+ dropout=0.0,
1025
+ softmax_scale=None,
1026
+ use_sliding_windows=False,
1027
+ ):
1028
+ """
1029
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
1030
+ first unpad the input, then computes the attention scores and pad the final attention scores.
1031
+
1032
+ Args:
1033
+ query_states (`torch.Tensor`):
1034
+ Input query states to be passed to Flash Attention API
1035
+ key_states (`torch.Tensor`):
1036
+ Input key states to be passed to Flash Attention API
1037
+ value_states (`torch.Tensor`):
1038
+ Input value states to be passed to Flash Attention API
1039
+ attention_mask (`torch.Tensor`):
1040
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
1041
+ position of padding tokens and 1 for the position of non-padding tokens.
1042
+ dropout (`float`):
1043
+ Attention dropout
1044
+ softmax_scale (`float`, *optional*):
1045
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
1046
+ use_sliding_windows (`bool`, *optional*):
1047
+ Whether to activate sliding window attention.
1048
+ """
1049
+ if not self._flash_attn_uses_top_left_mask:
1050
+ causal = self.is_causal
1051
+ else:
1052
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
1053
+ causal = self.is_causal and query_length != 1
1054
+
1055
+ # Contains at least one padding token in the sequence
1056
+ if attention_mask is not None:
1057
+ batch_size = query_states.shape[0]
1058
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
1059
+ query_states, key_states, value_states, attention_mask, query_length
1060
+ )
1061
+
1062
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
1063
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
1064
+
1065
+ if not use_sliding_windows:
1066
+ attn_output_unpad = flash_attn_varlen_func(
1067
+ query_states,
1068
+ key_states,
1069
+ value_states,
1070
+ cu_seqlens_q=cu_seqlens_q,
1071
+ cu_seqlens_k=cu_seqlens_k,
1072
+ max_seqlen_q=max_seqlen_in_batch_q,
1073
+ max_seqlen_k=max_seqlen_in_batch_k,
1074
+ dropout_p=dropout,
1075
+ softmax_scale=softmax_scale,
1076
+ causal=causal,
1077
+ )
1078
+ else:
1079
+ attn_output_unpad = flash_attn_varlen_func(
1080
+ query_states,
1081
+ key_states,
1082
+ value_states,
1083
+ cu_seqlens_q=cu_seqlens_q,
1084
+ cu_seqlens_k=cu_seqlens_k,
1085
+ max_seqlen_q=max_seqlen_in_batch_q,
1086
+ max_seqlen_k=max_seqlen_in_batch_k,
1087
+ dropout_p=dropout,
1088
+ softmax_scale=softmax_scale,
1089
+ causal=causal,
1090
+ window_size=(self.config.sliding_window, self.config.sliding_window),
1091
+ )
1092
+
1093
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
1094
+ else:
1095
+ if not use_sliding_windows:
1096
+ attn_output = flash_attn_func(
1097
+ query_states,
1098
+ key_states,
1099
+ value_states,
1100
+ dropout,
1101
+ softmax_scale=softmax_scale,
1102
+ causal=causal,
1103
+ )
1104
+ else:
1105
+ attn_output = flash_attn_func(
1106
+ query_states,
1107
+ key_states,
1108
+ value_states,
1109
+ dropout,
1110
+ softmax_scale=softmax_scale,
1111
+ causal=causal,
1112
+ window_size=(self.config.sliding_window, self.config.sliding_window),
1113
+ )
1114
+
1115
+ return attn_output
1116
+
1117
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
1118
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
1119
+
1120
+ # On the first iteration we need to properly re-create the padding mask
1121
+ # by slicing it on the proper place
1122
+ if kv_seq_len != attention_mask.shape[-1]:
1123
+ attention_mask_num_tokens = attention_mask.shape[-1]
1124
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
1125
+
1126
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
1127
+
1128
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
1129
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
1130
+
1131
+ if query_length == kv_seq_len:
1132
+ query_layer = index_first_axis(
1133
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
1134
+ )
1135
+ cu_seqlens_q = cu_seqlens_k
1136
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
1137
+ indices_q = indices_k
1138
+ elif query_length == 1:
1139
+ max_seqlen_in_batch_q = 1
1140
+ cu_seqlens_q = torch.arange(
1141
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
1142
+ ) # There is a memcpy here, that is very bad.
1143
+ indices_q = cu_seqlens_q[:-1]
1144
+ query_layer = query_layer.squeeze(1)
1145
+ else:
1146
+ # The -q_len: slice assumes left padding.
1147
+ attention_mask = attention_mask[:, -query_length:]
1148
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
1149
+
1150
+ return (
1151
+ query_layer,
1152
+ key_layer,
1153
+ value_layer,
1154
+ indices_q,
1155
+ (cu_seqlens_q, cu_seqlens_k),
1156
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
1157
+ )
1158
+
1159
+
1160
+ IDEFICS2_PERCEIVER_ATTENTION_CLASSES = {
1161
+ "eager": Idefics2PerceiverAttention,
1162
+ "flash_attention_2": Idefics2PerceiverFlashAttention2,
1163
+ }
1164
+
1165
+
1166
+ class Idefics2PerceiverLayer(nn.Module):
1167
+ def __init__(self, config, layer_idx: int):
1168
+ super().__init__()
1169
+ self.hidden_size = config.text_config.hidden_size
1170
+ self.n_latents = config.perceiver_config.resampler_n_latents
1171
+ self.depth = config.perceiver_config.resampler_depth
1172
+ self.rms_norm_eps = config.text_config.rms_norm_eps
1173
+
1174
+ self.input_latents_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1175
+ self.input_context_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1176
+ self.self_attn = IDEFICS2_PERCEIVER_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
1177
+ self.post_attention_layernorm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1178
+ self.mlp = Idefics2MLP(
1179
+ hidden_size=config.text_config.hidden_size,
1180
+ intermediate_size=config.text_config.hidden_size * 4,
1181
+ output_size=config.text_config.hidden_size,
1182
+ hidden_act=config.perceiver_config.hidden_act,
1183
+ )
1184
+
1185
+ def forward(
1186
+ self,
1187
+ latents: torch.Tensor,
1188
+ context: torch.Tensor,
1189
+ attention_mask: Optional[torch.Tensor] = None,
1190
+ position_ids: Optional[torch.LongTensor] = None,
1191
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
1192
+ output_attentions: Optional[bool] = False,
1193
+ use_cache: Optional[bool] = False,
1194
+ **kwargs,
1195
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
1196
+ """
1197
+ Args:
1198
+ latents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1199
+ context (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1200
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
1201
+ `(batch, sequence_length)` where padding elements are indicated by 0.
1202
+ output_attentions (`bool`, *optional*):
1203
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1204
+ returned tensors for more detail.
1205
+ use_cache (`bool`, *optional*):
1206
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1207
+ (see `past_key_values`).
1208
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
1209
+ """
1210
+ residual = latents
1211
+
1212
+ latents = self.input_latents_norm(latents)
1213
+ context = self.input_context_norm(context)
1214
+
1215
+ latents, self_attn_weights, present_key_value = self.self_attn(
1216
+ latents=latents,
1217
+ context=context,
1218
+ attention_mask=attention_mask,
1219
+ )
1220
+ latents = residual + latents
1221
+ residual = latents
1222
+
1223
+ latents = self.post_attention_layernorm(latents)
1224
+ latents = self.mlp(latents)
1225
+ latents = residual + latents
1226
+
1227
+ outputs = (latents,)
1228
+
1229
+ if output_attentions:
1230
+ outputs += (self_attn_weights,)
1231
+
1232
+ if use_cache:
1233
+ outputs += (present_key_value,)
1234
+
1235
+ return outputs
1236
+
1237
+
1238
+ class Idefics2PerceiverResampler(nn.Module):
1239
+ def __init__(self, config) -> None:
1240
+ """
1241
+ Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
1242
+ MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
1243
+ returns a Tensor of shape [bsz, n_latents, embed_dim]. The Resampler acts as a form of learned pooling and
1244
+ is derived from [Perceiver: General Perception with Iterative Attention](https://arxiv.org/abs/2103.03206).
1245
+ """
1246
+ super().__init__()
1247
+ self.hidden_size = config.text_config.hidden_size
1248
+ self.hidden_act = config.perceiver_config.hidden_act
1249
+ self.n_latents = config.perceiver_config.resampler_n_latents
1250
+ self.depth = config.perceiver_config.resampler_depth
1251
+ self.rms_norm_eps = config.text_config.rms_norm_eps
1252
+
1253
+ # Create Latents for Perceiver
1254
+ self.latents = nn.Parameter(torch.ones(self.n_latents, self.hidden_size))
1255
+
1256
+ # Create Transformer Blocks
1257
+ self.layers = nn.ModuleList([Idefics2PerceiverLayer(config, idx) for idx in range(self.depth)])
1258
+ self.norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1259
+
1260
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
1261
+
1262
+ def forward(
1263
+ self,
1264
+ context: torch.Tensor,
1265
+ attention_mask,
1266
+ ) -> torch.Tensor:
1267
+ # seq embed -> bsz seq embed
1268
+ latents = self.latents.unsqueeze(0).expand((context.shape[0], *self.latents.size()))
1269
+
1270
+ latent_attention_mask = torch.ones(
1271
+ (attention_mask.size(0), latents.size(1)), dtype=attention_mask.dtype, device=attention_mask.device
1272
+ )
1273
+ attention_mask = torch.cat([attention_mask, latent_attention_mask], dim=-1)
1274
+ attention_mask = (
1275
+ _prepare_4d_attention_mask(attention_mask, latents.dtype, tgt_len=self.n_latents)
1276
+ if not self._use_flash_attention_2
1277
+ else attention_mask
1278
+ )
1279
+
1280
+ compressed_context = latents
1281
+ for perceiver_layer in self.layers:
1282
+ layer_outputs = perceiver_layer(
1283
+ compressed_context,
1284
+ context,
1285
+ attention_mask=attention_mask,
1286
+ position_ids=None,
1287
+ past_key_value=None,
1288
+ output_attentions=False,
1289
+ use_cache=False,
1290
+ )
1291
+
1292
+ compressed_context = layer_outputs[0]
1293
+
1294
+ compressed_context = self.norm(compressed_context)
1295
+
1296
+ return compressed_context
1297
+
1298
+
1299
+ class Idefics2Connector(nn.Module):
1300
+ def __init__(self, config):
1301
+ super().__init__()
1302
+ self.modality_projection = Idefics2MLP(
1303
+ hidden_size=config.vision_config.hidden_size,
1304
+ intermediate_size=config.text_config.intermediate_size,
1305
+ output_size=config.text_config.hidden_size,
1306
+ hidden_act=config.text_config.hidden_act,
1307
+ )
1308
+ self.perceiver_resampler = Idefics2PerceiverResampler(config)
1309
+
1310
+ def forward(self, image_hidden_states, attention_mask):
1311
+ image_hidden_states = self.modality_projection(image_hidden_states)
1312
+ image_hidden_states = self.perceiver_resampler(context=image_hidden_states, attention_mask=attention_mask)
1313
+ return image_hidden_states
1314
+
1315
+
1316
+ IDEFICS2_START_DOCSTRING = r"""
1317
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1318
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1319
+ etc.)
1320
+
1321
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1322
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1323
+ and behavior.
1324
+
1325
+ Parameters:
1326
+ config ([`Idefics2Config`] or [`Idefics2VisionConfig`]):
1327
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1328
+ load the weights associated with the model, only the configuration. Check out the
1329
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1330
+ """
1331
+
1332
+
1333
+ @add_start_docstrings(
1334
+ "The bare Idefics2 Model outputting raw hidden-states without any specific head on top.",
1335
+ IDEFICS2_START_DOCSTRING,
1336
+ )
1337
+ class Idefics2PreTrainedModel(PreTrainedModel):
1338
+ config_class = Idefics2Config
1339
+ base_model_prefix = "model"
1340
+ supports_gradient_checkpointing = True
1341
+ _no_split_modules = ["Idefics2VisionAttention", "Idefics2MLP", "Idefics2PerceiverLayer", "Idefics2DecoderLayer"]
1342
+ _skip_keys_device_placement = "past_key_values"
1343
+ _supports_flash_attn_2 = True
1344
+ _supports_cache_class = True
1345
+
1346
+ def _init_weights(self, module):
1347
+ # important: this ported version of Idefics2 isn't meant for training from scratch - only
1348
+ # inference and fine-tuning - so the proper init weights code has been removed - the original codebase
1349
+ # https://github.com/haotian-liu/LLaVA/tree/main/idefics2 should serve for that purpose
1350
+ std = (
1351
+ self.config.text_config.initializer_range
1352
+ if hasattr(self.config, "initializer_range")
1353
+ else self.config.text_config.initializer_range
1354
+ )
1355
+
1356
+ if hasattr(module, "class_embedding"):
1357
+ module.class_embedding.data.normal_(mean=0.0, std=std)
1358
+
1359
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
1360
+ module.weight.data.normal_(mean=0.0, std=std)
1361
+ if module.bias is not None:
1362
+ module.bias.data.zero_()
1363
+ elif isinstance(module, nn.Embedding):
1364
+ module.weight.data.normal_(mean=0.0, std=std)
1365
+ if module.padding_idx is not None:
1366
+ module.weight.data[module.padding_idx].zero_()
1367
+
1368
+ @classmethod
1369
+ def _autoset_attn_implementation(
1370
+ cls,
1371
+ config,
1372
+ use_flash_attention_2: bool = False,
1373
+ torch_dtype: Optional[torch.dtype] = None,
1374
+ device_map: Optional[Union[str, Dict[str, int]]] = None,
1375
+ check_device_map: bool = True,
1376
+ **kwargs,
1377
+ ):
1378
+ """
1379
+ Overrides the method in `PreTrainedModel` to update the vision config with the correct attention implementation
1380
+ """
1381
+ config = super()._autoset_attn_implementation(
1382
+ config=config,
1383
+ use_flash_attention_2=use_flash_attention_2,
1384
+ torch_dtype=torch_dtype,
1385
+ device_map=device_map,
1386
+ check_device_map=check_device_map,
1387
+ **kwargs,
1388
+ )
1389
+ config.vision_config._attn_implementation = config._attn_implementation
1390
+ return config
1391
+
1392
+
1393
+ IDEFICS2_INPUTS_DOCSTRING = r"""
1394
+ Args:
1395
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1396
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1397
+ it.
1398
+
1399
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1400
+ [`PreTrainedTokenizer.__call__`] for details.
1401
+
1402
+ [What are input IDs?](../glossary#input-ids)
1403
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1404
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1405
+
1406
+ - 1 for tokens that are **not masked**,
1407
+ - 0 for tokens that are **masked**.
1408
+
1409
+ [What are attention masks?](../glossary#attention-mask)
1410
+
1411
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1412
+ [`PreTrainedTokenizer.__call__`] for details.
1413
+
1414
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1415
+ `past_key_values`).
1416
+
1417
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1418
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1419
+ information on the default strategy.
1420
+
1421
+ - 1 indicates the head is **not masked**,
1422
+ - 0 indicates the head is **masked**.
1423
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1424
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1425
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
1426
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1427
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
1428
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
1429
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
1430
+
1431
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1432
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1433
+
1434
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1435
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1436
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1437
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1438
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1439
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1440
+ model's internal embedding lookup matrix.
1441
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
1442
+ The tensors corresponding to the input images. Pixel values can be obtained using
1443
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses
1444
+ [`CLIPImageProcessor`] for processing images).
1445
+ pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
1446
+ Mask to avoid performing attention on padding pixel indices.
1447
+ image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1448
+ The hidden states of the image encoder after modality projection and perceiver resampling.
1449
+ use_cache (`bool`, *optional*):
1450
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1451
+ `past_key_values`).
1452
+ output_attentions (`bool`, *optional*):
1453
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1454
+ tensors for more detail.
1455
+ output_hidden_states (`bool`, *optional*):
1456
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1457
+ more detail.
1458
+ return_dict (`bool`, *optional*):
1459
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1460
+ """
1461
+
1462
+
1463
+ @add_start_docstrings(
1464
+ """Idefics2 model consisting of a SIGLIP vision encoder and Mistral language decoder""",
1465
+ IDEFICS2_START_DOCSTRING,
1466
+ )
1467
+ class Idefics2Model(Idefics2PreTrainedModel):
1468
+ def __init__(self, config: Idefics2Config):
1469
+ super().__init__(config)
1470
+ self.padding_idx = self.config.text_config.pad_token_id
1471
+ self.vocab_size = self.config.text_config.vocab_size
1472
+
1473
+ self.vision_model = Idefics2VisionTransformer(config.vision_config)
1474
+ self.connector = Idefics2Connector(config)
1475
+ self.text_model = AutoModel.from_config(config.text_config, attn_implementation=config._attn_implementation)
1476
+
1477
+ self.image_seq_len = config.perceiver_config.resampler_n_latents
1478
+ self.image_token_id = self.config.image_token_id
1479
+
1480
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
1481
+
1482
+ self.post_init()
1483
+
1484
+ def enable_input_require_grads(self):
1485
+ """
1486
+ Enables the gradients for the input embeddings.
1487
+
1488
+ This is useful for lora when using gradient checkpointing.
1489
+ c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032
1490
+
1491
+ Override to set output.requires_grad = True for both the decoder's and vision model's embeddings.
1492
+ """
1493
+
1494
+ def get_lowest_module(module):
1495
+ if len(list(module.children())) == 0:
1496
+ # If the module has no children, it is a leaf module (e.g., Linear, Conv2d, etc.)
1497
+ return module
1498
+ else:
1499
+ # Recursively call the function on each child module
1500
+ return get_lowest_module(list(module.children())[0])
1501
+
1502
+ def make_inputs_require_grads(module, input, output):
1503
+ output.requires_grad_(True)
1504
+
1505
+ self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
1506
+ self._vision_require_grads_hook = get_lowest_module(self.vision_model).register_forward_hook(
1507
+ make_inputs_require_grads
1508
+ )
1509
+
1510
+ def get_input_embeddings(self):
1511
+ return self.text_model.get_input_embeddings()
1512
+
1513
+ def set_input_embeddings(self, value):
1514
+ self.text_model.set_input_embeddings(value)
1515
+
1516
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
1517
+ model_embeds = self.text_model.resize_token_embeddings(
1518
+ new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of
1519
+ )
1520
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
1521
+ return model_embeds
1522
+
1523
+ def inputs_merger(
1524
+ self,
1525
+ input_ids: torch.LongTensor,
1526
+ inputs_embeds: Optional[torch.Tensor],
1527
+ image_hidden_states: Optional[torch.Tensor],
1528
+ ):
1529
+ """
1530
+ This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM.
1531
+ The merging happens as follows:
1532
+ - The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`.
1533
+ - We get the image hidden states for the image through the vision encoder (and potentially the perceiver), and that hidden state is then projected into the text embedding space.
1534
+ We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer.
1535
+ - The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM.
1536
+ - To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states.
1537
+ """
1538
+ num_images, _, vision_hidden_size = image_hidden_states.shape
1539
+ special_image_token_mask = input_ids == self.image_token_id
1540
+ new_inputs_embeds = inputs_embeds.clone()
1541
+ reshaped_image_hidden_states = image_hidden_states.view(-1, vision_hidden_size)
1542
+ new_inputs_embeds[special_image_token_mask] = reshaped_image_hidden_states
1543
+ return new_inputs_embeds
1544
+
1545
+ @add_start_docstrings_to_model_forward(
1546
+ """
1547
+ Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
1548
+ the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
1549
+ max_num_images is the maximum number of images among the batch_size samples in the batch.
1550
+
1551
+ Padding images are not needed beyond padding the pixel_values at the entrance of the model.
1552
+ For efficiency, we only pass through the vision_model's forward the real images by
1553
+ discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
1554
+ image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
1555
+ """,
1556
+ IDEFICS2_INPUTS_DOCSTRING,
1557
+ )
1558
+ def forward(
1559
+ self,
1560
+ input_ids: torch.LongTensor = None,
1561
+ attention_mask: Optional[torch.Tensor] = None,
1562
+ position_ids: Optional[torch.LongTensor] = None,
1563
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1564
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1565
+ pixel_values: Optional[torch.FloatTensor] = None,
1566
+ pixel_attention_mask: Optional[torch.BoolTensor] = None,
1567
+ image_hidden_states: Optional[torch.FloatTensor] = None,
1568
+ use_cache: Optional[bool] = None,
1569
+ output_attentions: Optional[bool] = None,
1570
+ output_hidden_states: Optional[bool] = None,
1571
+ return_dict: Optional[bool] = None,
1572
+ ) -> Union[Tuple, Idefics2BaseModelOutputWithPast]:
1573
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1574
+ output_hidden_states = (
1575
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1576
+ )
1577
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1578
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1579
+
1580
+ if self.training and self.text_model.gradient_checkpointing and use_cache:
1581
+ logger.warning_once(
1582
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1583
+ )
1584
+ use_cache = False
1585
+
1586
+ # retrieve input_ids and inputs_embeds
1587
+ if input_ids is not None:
1588
+ batch_size, seq_length = input_ids.shape
1589
+ elif inputs_embeds is not None:
1590
+ batch_size, seq_length, _ = inputs_embeds.shape
1591
+ else:
1592
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1593
+
1594
+ past_seen_tokens = 0
1595
+ return_legacy_cache = False
1596
+ if use_cache and not isinstance(past_key_values, Cache): # kept for BC (non `Cache` `past_key_values` inputs)
1597
+ return_legacy_cache = True
1598
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1599
+ past_seen_tokens = past_key_values.get_usable_length(seq_length)
1600
+
1601
+ if inputs_embeds is not None and input_ids is None and past_seen_tokens == 0:
1602
+ raise ValueError("When first calling the model, if input_embeds are passed, input_ids should not be None.")
1603
+
1604
+ if inputs_embeds is None:
1605
+ inputs_embeds = self.text_model.get_input_embeddings()(input_ids)
1606
+
1607
+ # START VISUAL INPUTS INTEGRATION
1608
+ if pixel_values is not None and image_hidden_states is not None:
1609
+ raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time")
1610
+ elif pixel_values is not None:
1611
+ batch_size, num_images, num_channels, height, width = pixel_values.shape
1612
+ pixel_values = pixel_values.to(dtype=self.dtype) # fp16 compatibility
1613
+ pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])
1614
+
1615
+ # Remove padding images - padding images are full 0.
1616
+ nb_values_per_image = pixel_values.shape[1:].numel()
1617
+ real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image
1618
+ pixel_values = pixel_values[real_images_inds].contiguous()
1619
+
1620
+ # Handle the vision attention mask
1621
+ if pixel_attention_mask is None:
1622
+ pixel_attention_mask = torch.ones(
1623
+ size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)),
1624
+ dtype=torch.bool,
1625
+ device=pixel_values.device,
1626
+ )
1627
+ else:
1628
+ # Remove padding images from the mask/pP p
1629
+ pixel_attention_mask = pixel_attention_mask.view(
1630
+ batch_size * num_images, *pixel_attention_mask.shape[2:]
1631
+ )
1632
+ pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()
1633
+
1634
+ patch_size = self.config.vision_config.patch_size
1635
+ patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)
1636
+ patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)
1637
+ patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
1638
+
1639
+ # Get sequence from the vision encoder
1640
+ image_hidden_states = self.vision_model(
1641
+ pixel_values=pixel_values,
1642
+ patch_attention_mask=patch_attention_mask,
1643
+ ).last_hidden_state
1644
+
1645
+ # Modality projection & resampling
1646
+ image_hidden_states = self.connector(
1647
+ image_hidden_states, attention_mask=patch_attention_mask.view(pixel_values.size(0), -1)
1648
+ )
1649
+
1650
+ elif image_hidden_states is not None:
1651
+ image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device)
1652
+
1653
+ if past_seen_tokens == 0 and inputs_embeds is not None and image_hidden_states is not None:
1654
+ # When we generate, we don't want to replace the potential image_token_id that we generated by images
1655
+ # that simply don't exist
1656
+ inputs_embeds = self.inputs_merger(
1657
+ input_ids=input_ids,
1658
+ inputs_embeds=inputs_embeds,
1659
+ image_hidden_states=image_hidden_states,
1660
+ )
1661
+
1662
+ outputs = self.text_model(
1663
+ inputs_embeds=inputs_embeds,
1664
+ attention_mask=attention_mask,
1665
+ position_ids=position_ids,
1666
+ past_key_values=past_key_values,
1667
+ output_attentions=output_attentions,
1668
+ output_hidden_states=output_hidden_states,
1669
+ return_dict=return_dict,
1670
+ )
1671
+
1672
+ if return_legacy_cache:
1673
+ outputs.past_key_values = outputs.past_key_values.to_legacy_cache()
1674
+
1675
+ if not return_dict:
1676
+ return tuple(v for v in [*outputs, image_hidden_states] if v is not None)
1677
+
1678
+ return Idefics2BaseModelOutputWithPast(
1679
+ last_hidden_state=outputs.last_hidden_state,
1680
+ past_key_values=outputs.past_key_values,
1681
+ hidden_states=outputs.hidden_states,
1682
+ attentions=outputs.attentions,
1683
+ image_hidden_states=image_hidden_states,
1684
+ )
1685
+
1686
+
1687
+ @add_start_docstrings(
1688
+ """The Idefics2 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top. """,
1689
+ IDEFICS2_START_DOCSTRING,
1690
+ )
1691
+ class Idefics2ForConditionalGeneration(Idefics2PreTrainedModel):
1692
+ _tied_weights_keys = ["lm_head.weight"]
1693
+
1694
+ def __init__(self, config):
1695
+ super().__init__(config)
1696
+ self.model = Idefics2Model(config)
1697
+ self.image_token_id = self.config.image_token_id
1698
+
1699
+ self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
1700
+ self.vocab_size = config.text_config.vocab_size
1701
+
1702
+ # Initialize weights and apply final processing
1703
+ self.post_init()
1704
+
1705
+ def enable_input_require_grads(self):
1706
+ """
1707
+ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
1708
+ the model weights fixed.
1709
+ """
1710
+
1711
+ def make_inputs_require_grads(module, input, output):
1712
+ output.requires_grad_(True)
1713
+
1714
+ self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
1715
+ self._vision_require_grads_hook = self.model.vision_model.get_input_embeddings().register_forward_hook(
1716
+ make_inputs_require_grads
1717
+ )
1718
+
1719
+ def get_input_embeddings(self):
1720
+ return self.model.text_model.get_input_embeddings()
1721
+
1722
+ def set_input_embeddings(self, value):
1723
+ self.model.text_model.set_input_embeddings(value)
1724
+
1725
+ def get_output_embeddings(self):
1726
+ return self.lm_head
1727
+
1728
+ def set_output_embeddings(self, new_embeddings):
1729
+ self.lm_head = new_embeddings
1730
+
1731
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
1732
+ # model_embeds = self.model.resize_token_embeddings(new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of)
1733
+ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1734
+ if new_num_tokens is None and pad_to_multiple_of is None:
1735
+ return model_embeds
1736
+
1737
+ # Update base model and current model config
1738
+ # Ignore copy
1739
+ self.config.text_config.vocab_size = model_embeds.weight.shape[0]
1740
+ self.vocab_size = self.config.text_config.vocab_size
1741
+
1742
+ # Tie weights again if needed
1743
+ self.tie_weights()
1744
+
1745
+ return model_embeds
1746
+
1747
+ def tie_weights(self):
1748
+ """
1749
+ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding.
1750
+ """
1751
+ output_embeddings = self.get_output_embeddings()
1752
+ input_embeddings = self.get_input_embeddings()
1753
+
1754
+ if getattr(self.config, "tie_word_embeddings", True):
1755
+ output_embeddings.weight = input_embeddings.weight
1756
+
1757
+ @add_start_docstrings_to_model_forward(IDEFICS2_INPUTS_DOCSTRING)
1758
+ @replace_return_docstrings(output_type=Idefics2CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1759
+ def forward(
1760
+ self,
1761
+ input_ids: torch.LongTensor = None,
1762
+ attention_mask: Optional[torch.Tensor] = None,
1763
+ position_ids: Optional[torch.LongTensor] = None,
1764
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1765
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1766
+ pixel_values: Optional[torch.FloatTensor] = None,
1767
+ pixel_attention_mask: Optional[torch.BoolTensor] = None,
1768
+ image_hidden_states: Optional[torch.FloatTensor] = None,
1769
+ labels: Optional[torch.LongTensor] = None,
1770
+ use_cache: Optional[bool] = None,
1771
+ output_attentions: Optional[bool] = None,
1772
+ output_hidden_states: Optional[bool] = None,
1773
+ return_dict: Optional[bool] = None,
1774
+ ) -> Union[Tuple, Idefics2CausalLMOutputWithPast]:
1775
+ r"""
1776
+ Args:
1777
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1778
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1779
+ config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics2ForConditionalGeneration`).
1780
+ Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
1781
+ computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1782
+ Returns:
1783
+
1784
+ Example:
1785
+
1786
+ ```python
1787
+ >>> import requests
1788
+ >>> import torch
1789
+ >>> from PIL import Image
1790
+ >>> from io import BytesIO
1791
+
1792
+ >>> from transformers import AutoProcessor, AutoModelForVision2Seq
1793
+ >>> from transformers.image_utils import load_image
1794
+
1795
+ >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
1796
+ >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
1797
+ >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
1798
+ >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
1799
+
1800
+ >>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b-base")
1801
+ >>> model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/idefics2-8b-base", device_map="auto")
1802
+
1803
+ >>> BAD_WORDS_IDS = processor.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
1804
+ >>> EOS_WORDS_IDS = [processor.tokenizer.eos_token_id]
1805
+
1806
+ >>> # Create inputs
1807
+ >>> prompts = [
1808
+ ... "<image>In this image, we can see the city of New York, and more specifically the Statue of Liberty.<image>In this image,",
1809
+ ... "In which city is that bridge located?<image>",
1810
+ ... ]
1811
+ >>> images = [[image1, image2], [image3]]
1812
+ >>> inputs = processor(text=prompts, padding=True, return_tensors="pt").to("cuda")
1813
+
1814
+ >>> # Generate
1815
+ >>> generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=20)
1816
+ >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
1817
+
1818
+ >>> print(generated_texts)
1819
+ ['In this image, we can see the city of New York, and more specifically the Statue of Liberty. In this image, we can see the city of New York, and more specifically the Statue of Liberty.\n\n', 'In which city is that bridge located?\n\nThe bridge is located in the city of Pittsburgh, Pennsylvania.\n\n\nThe bridge is']
1820
+ ```"""
1821
+
1822
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1823
+ output_hidden_states = (
1824
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1825
+ )
1826
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1827
+
1828
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1829
+ outputs = self.model(
1830
+ input_ids=input_ids,
1831
+ attention_mask=attention_mask,
1832
+ position_ids=position_ids,
1833
+ past_key_values=past_key_values,
1834
+ inputs_embeds=inputs_embeds,
1835
+ pixel_values=pixel_values,
1836
+ pixel_attention_mask=pixel_attention_mask,
1837
+ image_hidden_states=image_hidden_states,
1838
+ use_cache=use_cache,
1839
+ output_attentions=output_attentions,
1840
+ output_hidden_states=output_hidden_states,
1841
+ return_dict=return_dict,
1842
+ )
1843
+
1844
+ hidden_states = outputs[0]
1845
+ logits = self.lm_head(hidden_states)
1846
+ logits = logits.float()
1847
+
1848
+ loss = None
1849
+ if labels is not None:
1850
+ labels = labels.to(logits.device)
1851
+ # Shift so that tokens < n predict n
1852
+ if attention_mask is not None:
1853
+ shift_attention_mask = attention_mask[..., 1:].to(logits.device)
1854
+ shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous()
1855
+ shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous()
1856
+ else:
1857
+ shift_logits = logits[..., :-1, :].contiguous()
1858
+ shift_labels = labels[..., 1:].contiguous()
1859
+ # Flatten the tokens
1860
+ loss_fct = CrossEntropyLoss(ignore_index=self.image_token_id)
1861
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1862
+
1863
+ if not return_dict:
1864
+ output = (logits,) + outputs[1:]
1865
+ return (loss,) + output if loss is not None else output
1866
+
1867
+ return Idefics2CausalLMOutputWithPast(
1868
+ loss=loss,
1869
+ logits=logits,
1870
+ past_key_values=outputs.past_key_values,
1871
+ hidden_states=outputs.hidden_states,
1872
+ attentions=outputs.attentions,
1873
+ image_hidden_states=outputs.image_hidden_states,
1874
+ )
1875
+
1876
+ def prepare_inputs_for_generation(
1877
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1878
+ ):
1879
+ # Omit tokens covered by past_key_values
1880
+ if past_key_values is not None:
1881
+ if isinstance(past_key_values, Cache):
1882
+ cache_length = past_key_values.get_seq_length()
1883
+ past_length = past_key_values.seen_tokens
1884
+ max_cache_length = past_key_values.get_max_length()
1885
+ else:
1886
+ cache_length = past_length = past_key_values[0][0].shape[2]
1887
+ max_cache_length = None
1888
+
1889
+ # Keep only the unprocessed tokens:
1890
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1891
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1892
+ # input)
1893
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1894
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1895
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1896
+ # input_ids based on the past_length.
1897
+ elif past_length < input_ids.shape[1]:
1898
+ input_ids = input_ids[:, past_length:]
1899
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1900
+
1901
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1902
+ if (
1903
+ max_cache_length is not None
1904
+ and attention_mask is not None
1905
+ and cache_length + input_ids.shape[1] > max_cache_length
1906
+ ):
1907
+ attention_mask = attention_mask[:, -max_cache_length:]
1908
+
1909
+ position_ids = kwargs.get("position_ids", None)
1910
+ if attention_mask is not None and position_ids is None:
1911
+ # create position_ids on the fly for batch generation
1912
+ position_ids = attention_mask.long().cumsum(-1) - 1
1913
+ position_ids.masked_fill_(attention_mask == 0, 1)
1914
+ if past_key_values:
1915
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1916
+
1917
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1918
+ if inputs_embeds is not None and past_key_values is None:
1919
+ model_inputs = {"inputs_embeds": inputs_embeds}
1920
+ else:
1921
+ model_inputs = {"input_ids": input_ids}
1922
+
1923
+ image_hidden_states = kwargs.get("image_hidden_states", None)
1924
+ if image_hidden_states is not None:
1925
+ pixel_values = None
1926
+ pixel_attention_mask = None
1927
+ else:
1928
+ pixel_values = kwargs.get("pixel_values", None)
1929
+ pixel_attention_mask = kwargs.get("pixel_attention_mask", None)
1930
+ model_inputs.update(
1931
+ {
1932
+ "position_ids": position_ids,
1933
+ "past_key_values": past_key_values,
1934
+ "use_cache": kwargs.get("use_cache"),
1935
+ "attention_mask": attention_mask,
1936
+ "pixel_values": pixel_values,
1937
+ "pixel_attention_mask": pixel_attention_mask,
1938
+ "image_hidden_states": image_hidden_states,
1939
+ }
1940
+ )
1941
+ return model_inputs
1942
+
1943
+ def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs):
1944
+ model_kwargs = super()._update_model_kwargs_for_generation(
1945
+ outputs=outputs,
1946
+ model_kwargs=model_kwargs,
1947
+ is_encoder_decoder=is_encoder_decoder,
1948
+ **kwargs,
1949
+ )
1950
+ # Get the precomputed image_hidden_states
1951
+ model_kwargs["image_hidden_states"] = outputs.image_hidden_states
1952
+ return model_kwargs
1953
+
1954
+ @staticmethod
1955
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
1956
+ def _reorder_cache(past_key_values, beam_idx):
1957
+ reordered_past = ()
1958
+ for layer_past in past_key_values:
1959
+ reordered_past += (
1960
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1961
+ )
1962
+ return reordered_past
parrot/lib/python3.10/site-packages/transformers/models/idefics2/processing_idefics2.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for IDEFICS2.
17
+ """
18
+
19
+ from typing import TYPE_CHECKING, Dict, List, Optional, Union
20
+
21
+ from ...feature_extraction_utils import BatchFeature
22
+ from ...image_utils import ImageInput, is_valid_image, load_image
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...tokenization_utils_base import AddedToken, BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy
25
+ from ...utils import TensorType, logging
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ from ...pipelines.conversational import Conversation
30
+ from ...tokenization_utils_base import PreTokenizedInput
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ def is_url(val) -> bool:
37
+ return isinstance(val, str) and val.startswith("http")
38
+
39
+
40
+ def is_image_or_image_url(elem):
41
+ return is_url(elem) or is_valid_image(elem)
42
+
43
+
44
+ class Idefics2Processor(ProcessorMixin):
45
+ r"""
46
+ Constructs a IDEFICS2 processor which wraps a LLama tokenizer and IDEFICS2 image processor into a single processor.
47
+
48
+ [`IdeficsProcessor`] offers all the functionalities of [`Idefics2ImageProcessor`] and [`LlamaTokenizerFast`]. See
49
+ the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
50
+
51
+ Args:
52
+ image_processor (`Idefics2ImageProcessor`):
53
+ An instance of [`Idefics2ImageProcessor`]. The image processor is a required input.
54
+ tokenizer (`PreTrainedTokenizerBase`, *optional*):
55
+ An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
56
+ image_seq_len (`int`, *optional*, defaults to 64):
57
+ The length of the image sequence i.e. the number of <image> tokens per image in the input.
58
+ This parameter is used to build the string from the input prompt and image tokens and should match the
59
+ config.perceiver_config.resampler_n_latents value for the model used.
60
+ """
61
+
62
+ attributes = ["image_processor", "tokenizer"]
63
+ image_processor_class = "Idefics2ImageProcessor"
64
+ tokenizer_class = "AutoTokenizer"
65
+
66
+ def __init__(self, image_processor, tokenizer=None, image_seq_len: int = 64, **kwargs):
67
+ if image_processor is None:
68
+ raise ValueError("You need to specify an `image_processor`.")
69
+ if tokenizer is None:
70
+ raise ValueError("You need to specify a `tokenizer`.")
71
+
72
+ self.fake_image_token = AddedToken("<fake_token_around_image>", normalized=False, special=True)
73
+ self.image_token = AddedToken("<image>", normalized=False, special=True)
74
+ self.end_of_utterance_token = AddedToken("<end_of_utterance>", normalized=False, special=True)
75
+ self.image_seq_len = image_seq_len
76
+
77
+ tokens_to_add = {
78
+ "additional_special_tokens": [self.fake_image_token, self.image_token, self.end_of_utterance_token]
79
+ }
80
+ tokenizer.add_special_tokens(tokens_to_add)
81
+
82
+ # Stores a Jinja template that formats chat histories into tokenizable strings
83
+ self.chat_template = kwargs.pop("chat_template", None)
84
+
85
+ super().__init__(image_processor, tokenizer)
86
+
87
+ def _extract_images_from_prompts(self, prompts):
88
+ prompt_images = []
89
+ for prompt in prompts:
90
+ images = []
91
+ for elem in prompt:
92
+ if is_valid_image(elem):
93
+ images.append(elem)
94
+ elif is_url(elem):
95
+ images.append(load_image(elem))
96
+ prompt_images.append(images)
97
+ return prompt_images
98
+
99
+ def __call__(
100
+ self,
101
+ text: Union[TextInput, "PreTokenizedInput", List[TextInput], List["PreTokenizedInput"]] = None,
102
+ images: Union[ImageInput, List[ImageInput], List[List[ImageInput]]] = None,
103
+ image_seq_len: Optional[int] = None,
104
+ padding: Union[bool, str, PaddingStrategy] = False,
105
+ truncation: Union[bool, str, TruncationStrategy] = None,
106
+ max_length: Optional[int] = None,
107
+ is_split_into_words: bool = False,
108
+ add_special_tokens: bool = True,
109
+ return_tensors: Optional[Union[str, TensorType]] = None,
110
+ ) -> BatchEncoding:
111
+ """
112
+ Processes the input prompts and returns a BatchEncoding.
113
+
114
+ Example:
115
+
116
+ ```python
117
+ >>> import requests
118
+ >>> from transformers import Idefics2Processor
119
+ >>> from transformers.image_utils import load_image
120
+
121
+ >>> processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b", image_seq_len=2)
122
+ >>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
123
+
124
+ >>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
125
+ >>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
126
+
127
+ >>> image1, image2 = load_image(url1), load_image(url2)
128
+ >>> images = [[image1], [image2]]
129
+
130
+ >>> text = [
131
+ ... "<image>In this image, we see",
132
+ ... "bla bla bla<image>",
133
+ ... ]
134
+ >>> outputs = processor(text=text, images=images, return_tensors="pt", padding=True)
135
+ >>> input_ids = outputs.input_ids
136
+ >>> input_tokens = processor.tokenizer.batch_decode(input_ids)
137
+ >>> print(input_tokens)
138
+ ['<s><fake_token_around_image><image><image><fake_token_around_image> In this image, we see', '<s> bla bla bla<fake_token_around_image><image><image><fake_token_around_image>']
139
+ ```
140
+
141
+ Args:
142
+ text (`Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]`, *optional*):
143
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
144
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
145
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
146
+
147
+ Wherever an image token, `<image>` is encountered it is expanded to
148
+ `<fake_token_around_image>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
149
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*):
150
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
151
+ tensor. If is of type `List[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
152
+ image_seq_len (`int`, *optional*):
153
+ The length of the image sequence. If not provided, the default value is used.
154
+ padding (`Union[bool, str, PaddingStrategy]`, *optional*, defaults to `False`):
155
+ Padding strategy applied to the input ids. See [`PreTrainedTokenizerFast.pad`] for more information.
156
+ truncation (`Union[bool, str, TruncationStrategy]`, *optional*):
157
+ Truncation strategy applied to the input ids. See [`PreTrainedTokenizerFast.truncate`] for more information.
158
+ max_length (`int`, *optional*):
159
+ Maximum length of the returned list and optionally padding/truncation length. See
160
+ [`PreTrainedTokenizerFast.__call__`] for more information.
161
+ is_split_into_words (`bool`, *optional*, defaults to `False`):
162
+ Whether the input text is split into words or not. If set to `True`, the tokenizer will skip the
163
+ tokenization process and assume the input is already tokenized.
164
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
165
+ Whether to add special tokens or not. See [`PreTrainedTokenizerFast.__call__`] for more information.
166
+ return_tensors (`Union[str, TensorType]`, *optional*):
167
+ If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
168
+ information.
169
+ """
170
+ image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len
171
+
172
+ n_images_in_text = []
173
+ inputs = BatchFeature()
174
+
175
+ if text is not None:
176
+ if isinstance(text, str):
177
+ text = [text]
178
+ elif not isinstance(text, list) and not isinstance(text[0], str):
179
+ raise ValueError("Invalid input text. Please provide a string, or a list of strings")
180
+
181
+ # Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len`
182
+ fake_image_token = self.fake_image_token.content
183
+ image_token = self.image_token.content
184
+ image_str = f"{fake_image_token}{image_token * image_seq_len}{fake_image_token}"
185
+
186
+ if self.image_processor.do_image_splitting:
187
+ # A single image token is split into 4 patches + 1 original image
188
+ image_str = image_str * 5
189
+
190
+ prompt_strings = []
191
+ for sample in text:
192
+ n_images_in_text.append(sample.count(image_token))
193
+ sample = sample.replace(image_token, image_str)
194
+ # Remove any double fake tokens if images are adjacent
195
+ sample = sample.replace(f"{fake_image_token}{fake_image_token}", f"{fake_image_token}")
196
+ prompt_strings.append(sample)
197
+
198
+ text_inputs = self.tokenizer(
199
+ text=prompt_strings,
200
+ add_special_tokens=add_special_tokens,
201
+ padding=padding,
202
+ truncation=truncation,
203
+ max_length=max_length,
204
+ is_split_into_words=is_split_into_words,
205
+ return_tensors=return_tensors,
206
+ )
207
+ inputs.update(text_inputs)
208
+
209
+ if images is not None:
210
+ if is_image_or_image_url(images):
211
+ images = [[images]]
212
+ elif isinstance(images, list) and is_image_or_image_url(images[0]):
213
+ images = [images]
214
+ elif (
215
+ not isinstance(images, list)
216
+ and not isinstance(images[0], list)
217
+ and not is_image_or_image_url(images[0][0])
218
+ ):
219
+ raise ValueError(
220
+ "Invalid input images. Please provide a single image or a list of images or a list of list of images."
221
+ )
222
+
223
+ n_images_in_images = [len(sample) for sample in images]
224
+ if text is not None and not n_images_in_images == n_images_in_text:
225
+ raise ValueError(
226
+ f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same."
227
+ )
228
+
229
+ # Load images if they are URLs
230
+ images = [[load_image(im) for im in sample] for sample in images]
231
+ image_inputs = self.image_processor(images, return_tensors=return_tensors)
232
+ inputs.update(image_inputs)
233
+
234
+ return inputs
235
+
236
+ def batch_decode(self, *args, **kwargs):
237
+ """
238
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
239
+ refer to the docstring of this method for more information.
240
+ """
241
+ return self.tokenizer.batch_decode(*args, **kwargs)
242
+
243
+ def decode(self, *args, **kwargs):
244
+ """
245
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
246
+ the docstring of this method for more information.
247
+ """
248
+ return self.tokenizer.decode(*args, **kwargs)
249
+
250
+ @property
251
+ def model_input_names(self):
252
+ tokenizer_input_names = self.tokenizer.model_input_names
253
+ image_processor_input_names = self.image_processor.model_input_names
254
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
255
+
256
+ def apply_chat_template(
257
+ self,
258
+ conversation: Union[List[Dict[str, str]], "Conversation"],
259
+ chat_template: Optional[str] = None,
260
+ tokenize: bool = False,
261
+ **kwargs,
262
+ ) -> str:
263
+ """
264
+ Overrides the tokenizer's `apply_chat_template` method to apply the IDEFICS2 chat template by default
265
+ if no chat template is provided.
266
+
267
+ By default, the output isn't tokenized. This is because the IDEFICS2 chat template is designed to insert
268
+ the image token <image> into the sequence according to the message, but does not handle expanding the image
269
+ tokens to the sequence length or adding the surrounding tokens e.g. <fake_image_token>.
270
+
271
+ Args:
272
+ conversation (`Union[List[Dict, str, str], "Conversation"]`):
273
+ The conversation to format.
274
+ chat_template (`Optional[str]`, *optional*):
275
+ The Jinja template to use for formatting the conversation. If not provided, the default chat template
276
+ is used.
277
+ tokenize (`bool`, *optional*, defaults to `False`):
278
+ Whether to tokenize the output or not.
279
+ **kwargs:
280
+ Additional keyword arguments for the tokenizer's `apply_chat_template` method.
281
+ """
282
+
283
+ if chat_template is None:
284
+ if self.chat_template is not None:
285
+ chat_template = self.chat_template
286
+ else:
287
+ logger.warning_once(
288
+ "No chat template is set for this processor, falling back to a default class-level template. This is "
289
+ "very error-prone, because models are often trained with templates different from the class default! "
290
+ "Default chat templates are a legacy feature and will be removed in Transformers v4.43, at which "
291
+ "point any code depending on them will stop working. We recommend setting a valid chat template before "
292
+ "then to ensure that this model continues working without issues."
293
+ )
294
+ chat_template = self.default_chat_template
295
+ return self.tokenizer.apply_chat_template(
296
+ conversation, chat_template=chat_template, tokenize=tokenize, **kwargs
297
+ )
298
+
299
+ @property
300
+ def default_chat_template(self):
301
+ """
302
+ This template formats inputs in the form of a chat history. For each message in the chat history:
303
+ * the template will output the role of the speaker followed by the content of the message.
304
+ * content can be a single string or a list of strings and images.
305
+ * If the content element is an image, the template will output a sequence of <image> tokens and <fake_token_around_image> token before and after each image
306
+ * The template will output an <end_of_utterance> token at the end of each message.
307
+
308
+ Example:
309
+
310
+ ```python
311
+ messages = [{
312
+ "role": "user",
313
+ "content": [
314
+ {"type": "text", "text": "What’s in this image?"},
315
+ {"type": "image"},
316
+ {"type": "image"},
317
+ ],
318
+ },
319
+ {
320
+ "role": "assistant",
321
+ "content": [{"type": "text", "text": "This picture depicts Idefix, the dog of Obelix in Asterix and Obelix. Idefix is running on the ground."},]
322
+ }]
323
+ ```
324
+
325
+ Will create outputs like:
326
+ ```
327
+ User: What is in this Image?<image><image><end_of_utterance>
328
+ Assistant: This picture depicts Idefix, the dog of Obelix in Asterix and Obelix. Idefix is running on the ground.<end_of_utterance>
329
+ ```
330
+ """
331
+ # fmt: off
332
+ return (
333
+ "{% for message in messages %}"
334
+ "{{message['role'].capitalize()}}"
335
+ "{% if message['content'][0]['type'] == 'image' %}"
336
+ "{{':'}}"
337
+ "{% else %}"
338
+ "{{': '}}"
339
+ "{% endif %}"
340
+ "{% for line in message['content'] %}"
341
+ "{% if line['type'] == 'text' %}"
342
+ "{{line['text']}}"
343
+ "{% elif line['type'] == 'image' %}"
344
+ "{{ '<image>' }}"
345
+ "{% endif %}"
346
+ "{% endfor %}"
347
+ "<end_of_utterance>\n"
348
+ "{% endfor %}"
349
+
350
+ "{% if add_generation_prompt %}"
351
+ "{{ 'Assistant:' }}"
352
+ "{% endif %}"
353
+ )
354
+ # fmt: on
parrot/lib/python3.10/site-packages/transformers/models/mpnet/configuration_mpnet.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ MPNet model configuration"""
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class MPNetConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`MPNetModel`] or a [`TFMPNetModel`]. It is used to
28
+ instantiate a MPNet model according to the specified arguments, defining the model architecture. Instantiating a
29
+ configuration with the defaults will yield a similar configuration to that of the MPNet
30
+ [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) architecture.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+ Args:
36
+ vocab_size (`int`, *optional*, defaults to 30527):
37
+ Vocabulary size of the MPNet model. Defines the number of different tokens that can be represented by the
38
+ `inputs_ids` passed when calling [`MPNetModel`] or [`TFMPNetModel`].
39
+ hidden_size (`int`, *optional*, defaults to 768):
40
+ Dimensionality of the encoder layers and the pooler layer.
41
+ num_hidden_layers (`int`, *optional*, defaults to 12):
42
+ Number of hidden layers in the Transformer encoder.
43
+ num_attention_heads (`int`, *optional*, defaults to 12):
44
+ Number of attention heads for each attention layer in the Transformer encoder.
45
+ intermediate_size (`int`, *optional*, defaults to 3072):
46
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
47
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
48
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
49
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
50
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
51
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
52
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
53
+ The dropout ratio for the attention probabilities.
54
+ max_position_embeddings (`int`, *optional*, defaults to 512):
55
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
56
+ just in case (e.g., 512 or 1024 or 2048).
57
+ initializer_range (`float`, *optional*, defaults to 0.02):
58
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
59
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
60
+ The epsilon used by the layer normalization layers.
61
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
62
+ The number of buckets to use for each attention layer.
63
+
64
+ Examples:
65
+
66
+ ```python
67
+ >>> from transformers import MPNetModel, MPNetConfig
68
+
69
+ >>> # Initializing a MPNet mpnet-base style configuration
70
+ >>> configuration = MPNetConfig()
71
+
72
+ >>> # Initializing a model from the mpnet-base style configuration
73
+ >>> model = MPNetModel(configuration)
74
+
75
+ >>> # Accessing the model configuration
76
+ >>> configuration = model.config
77
+ ```"""
78
+
79
+ model_type = "mpnet"
80
+
81
+ def __init__(
82
+ self,
83
+ vocab_size=30527,
84
+ hidden_size=768,
85
+ num_hidden_layers=12,
86
+ num_attention_heads=12,
87
+ intermediate_size=3072,
88
+ hidden_act="gelu",
89
+ hidden_dropout_prob=0.1,
90
+ attention_probs_dropout_prob=0.1,
91
+ max_position_embeddings=512,
92
+ initializer_range=0.02,
93
+ layer_norm_eps=1e-12,
94
+ relative_attention_num_buckets=32,
95
+ pad_token_id=1,
96
+ bos_token_id=0,
97
+ eos_token_id=2,
98
+ **kwargs,
99
+ ):
100
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
101
+
102
+ self.vocab_size = vocab_size
103
+ self.hidden_size = hidden_size
104
+ self.num_hidden_layers = num_hidden_layers
105
+ self.num_attention_heads = num_attention_heads
106
+ self.hidden_act = hidden_act
107
+ self.intermediate_size = intermediate_size
108
+ self.hidden_dropout_prob = hidden_dropout_prob
109
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
110
+ self.max_position_embeddings = max_position_embeddings
111
+ self.initializer_range = initializer_range
112
+ self.layer_norm_eps = layer_norm_eps
113
+ self.relative_attention_num_buckets = relative_attention_num_buckets
parrot/lib/python3.10/site-packages/transformers/models/nllb/__init__.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {}
26
+
27
+ try:
28
+ if not is_sentencepiece_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["tokenization_nllb"] = ["NllbTokenizer"]
34
+
35
+ try:
36
+ if not is_tokenizers_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_nllb_fast"] = ["NllbTokenizerFast"]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ try:
46
+ if not is_sentencepiece_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ from .tokenization_nllb import NllbTokenizer
52
+
53
+ try:
54
+ if not is_tokenizers_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .tokenization_nllb_fast import NllbTokenizerFast
60
+
61
+ else:
62
+ import sys
63
+
64
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
parrot/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (920 Bytes). View file
 
parrot/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ SPIECE_UNDERLINE = "▁"
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+
33
+ FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip
34
+
35
+
36
+ class NllbTokenizer(PreTrainedTokenizer):
37
+ """
38
+ Construct an NLLB tokenizer.
39
+
40
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
41
+ [SentencePiece](https://github.com/google/sentencepiece).
42
+
43
+ The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
44
+ <tokens> <eos>` for target language documents.
45
+
46
+ Examples:
47
+
48
+ ```python
49
+ >>> from transformers import NllbTokenizer
50
+
51
+ >>> tokenizer = NllbTokenizer.from_pretrained(
52
+ ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn"
53
+ ... )
54
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
55
+ >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
56
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
57
+ ```
58
+
59
+ Args:
60
+ vocab_file (`str`):
61
+ Path to the vocabulary file.
62
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
63
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
64
+
65
+ <Tip>
66
+
67
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
68
+ sequence. The token used is the `cls_token`.
69
+
70
+ </Tip>
71
+
72
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
73
+ The end of sequence token.
74
+
75
+ <Tip>
76
+
77
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
78
+ The token used is the `sep_token`.
79
+
80
+ </Tip>
81
+
82
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
83
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
84
+ sequence classification or for a text and a question for question answering. It is also used as the last
85
+ token of a sequence built with special tokens.
86
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
87
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
88
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
89
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
90
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
91
+ token instead.
92
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
93
+ The token used for padding, for example when batching sequences of different lengths.
94
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
95
+ The token used for masking values. This is the token used when training this model with masked language
96
+ modeling. This is the token which the model will try to predict.
97
+ tokenizer_file (`str`, *optional*):
98
+ The path to a tokenizer file to use instead of the vocab file.
99
+ src_lang (`str`, *optional*):
100
+ The language to use as source language for translation.
101
+ tgt_lang (`str`, *optional*):
102
+ The language to use as target language for translation.
103
+ sp_model_kwargs (`Dict[str, str]`):
104
+ Additional keyword arguments to pass to the model initialization.
105
+ """
106
+
107
+ vocab_files_names = VOCAB_FILES_NAMES
108
+ model_input_names = ["input_ids", "attention_mask"]
109
+
110
+ prefix_tokens: List[int] = []
111
+ suffix_tokens: List[int] = []
112
+
113
+ def __init__(
114
+ self,
115
+ vocab_file,
116
+ bos_token="<s>",
117
+ eos_token="</s>",
118
+ sep_token="</s>",
119
+ cls_token="<s>",
120
+ unk_token="<unk>",
121
+ pad_token="<pad>",
122
+ mask_token="<mask>",
123
+ tokenizer_file=None,
124
+ src_lang=None,
125
+ tgt_lang=None,
126
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
127
+ additional_special_tokens=None,
128
+ legacy_behaviour=False,
129
+ **kwargs,
130
+ ):
131
+ if additional_special_tokens is None:
132
+ additional_special_tokens = FAIRSEQ_LANGUAGE_CODES
133
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
134
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
135
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
136
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
137
+ # Mask token behave like a normal word, i.e. include the space before it
138
+ mask_token = (
139
+ AddedToken(mask_token, normalized=True, lstrip=True, special=True)
140
+ if isinstance(mask_token, str)
141
+ else mask_token
142
+ )
143
+
144
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
145
+ self.legacy_behaviour = legacy_behaviour
146
+
147
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
148
+ self.sp_model.Load(str(vocab_file))
149
+ self.vocab_file = vocab_file
150
+ # Original fairseq vocab and spm vocab must be "aligned":
151
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
152
+ # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
153
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
154
+ # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
155
+
156
+ # unk token needs to be in the vocab with correct index
157
+ self._added_tokens_decoder = {0: bos_token, 1: pad_token, 2: eos_token, 3: unk_token}
158
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
159
+ self.fairseq_offset = 1
160
+ self.sp_model_size = len(self.sp_model)
161
+
162
+ # Everything that follows is kept for BC and will be removed in v4.38
163
+ self._fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
164
+ language_codes = FAIRSEQ_LANGUAGE_CODES if additional_special_tokens is None else additional_special_tokens
165
+ self._lang_code_to_id = {
166
+ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(language_codes)
167
+ }
168
+ self._id_to_lang_code = {v: k for k, v in self._lang_code_to_id.items()}
169
+ self._fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
170
+
171
+ self._fairseq_tokens_to_ids.update(self.lang_code_to_id)
172
+ self._fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
173
+
174
+ super().__init__(
175
+ bos_token=bos_token,
176
+ eos_token=eos_token,
177
+ unk_token=unk_token,
178
+ sep_token=sep_token,
179
+ cls_token=cls_token,
180
+ pad_token=pad_token,
181
+ mask_token=mask_token,
182
+ tokenizer_file=tokenizer_file,
183
+ src_lang=src_lang,
184
+ tgt_lang=tgt_lang,
185
+ additional_special_tokens=additional_special_tokens,
186
+ sp_model_kwargs=self.sp_model_kwargs,
187
+ legacy_behaviour=legacy_behaviour,
188
+ **kwargs,
189
+ )
190
+
191
+ self._src_lang = src_lang if src_lang is not None else "eng_Latn"
192
+ self.cur_lang_code_id = self.convert_tokens_to_ids(self._src_lang)
193
+ self.tgt_lang = tgt_lang
194
+ self.set_src_lang_special_tokens(self._src_lang)
195
+
196
+ def __getstate__(self):
197
+ state = self.__dict__.copy()
198
+ state["sp_model"] = None
199
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
200
+ return state
201
+
202
+ def __setstate__(self, d):
203
+ self.__dict__ = d
204
+
205
+ # for backward compatibility
206
+ if not hasattr(self, "sp_model_kwargs"):
207
+ self.sp_model_kwargs = {}
208
+
209
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
210
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
211
+
212
+ @property
213
+ def vocab_size(self):
214
+ return len(self.sp_model) + self.fairseq_offset
215
+
216
+ @property
217
+ def src_lang(self) -> str:
218
+ return self._src_lang
219
+
220
+ @property
221
+ def lang_code_to_id(self):
222
+ logger.warning_once(
223
+ "the `lang_code_to_id` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
224
+ " this attribute will be removed in `transformers` v4.38"
225
+ )
226
+ return self._lang_code_to_id
227
+
228
+ @property
229
+ def fairseq_tokens_to_ids(self):
230
+ logger.warning_once(
231
+ "the `fairseq_tokens_to_ids` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
232
+ " this attribute will be removed in `transformers` v4.38"
233
+ )
234
+ return self._fairseq_tokens_to_ids
235
+
236
+ @property
237
+ def id_to_lang_code(self):
238
+ logger.warning_once(
239
+ "the `id_to_lang_code` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
240
+ " this attribute will be removed in `transformers` v4.38"
241
+ )
242
+ return self._id_to_lang_code
243
+
244
+ @property
245
+ def fairseq_ids_to_tokens(self):
246
+ logger.warning_once(
247
+ "the `_fairseq_ids_to_tokens` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
248
+ " this attribute will be removed in `transformers` v4.38"
249
+ )
250
+ return self._fairseq_ids_to_tokens
251
+
252
+ @src_lang.setter
253
+ def src_lang(self, new_src_lang: str) -> None:
254
+ self._src_lang = new_src_lang
255
+ self.set_src_lang_special_tokens(self._src_lang)
256
+
257
+ def get_special_tokens_mask(
258
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
259
+ ) -> List[int]:
260
+ """
261
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
262
+ special tokens using the tokenizer `prepare_for_model` method.
263
+
264
+ Args:
265
+ token_ids_0 (`List[int]`):
266
+ List of IDs.
267
+ token_ids_1 (`List[int]`, *optional*):
268
+ Optional second list of IDs for sequence pairs.
269
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
270
+ Whether or not the token list is already formatted with special tokens for the model.
271
+
272
+ Returns:
273
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
274
+ """
275
+
276
+ if already_has_special_tokens:
277
+ return super().get_special_tokens_mask(
278
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
279
+ )
280
+
281
+ prefix_ones = [1] * len(self.prefix_tokens)
282
+ suffix_ones = [1] * len(self.suffix_tokens)
283
+ if token_ids_1 is None:
284
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
285
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
286
+
287
+ def build_inputs_with_special_tokens(
288
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
289
+ ) -> List[int]:
290
+ """
291
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
292
+ adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:
293
+
294
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
295
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
296
+
297
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
298
+ separator.
299
+
300
+ Args:
301
+ token_ids_0 (`List[int]`):
302
+ List of IDs to which the special tokens will be added.
303
+ token_ids_1 (`List[int]`, *optional*):
304
+ Optional second list of IDs for sequence pairs.
305
+
306
+ Returns:
307
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
308
+ """
309
+ if token_ids_1 is None:
310
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
311
+ # We don't expect to process pairs, but leave the pair logic for API consistency
312
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
313
+
314
+ def create_token_type_ids_from_sequences(
315
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
316
+ ) -> List[int]:
317
+ """
318
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
319
+ make use of token type ids, therefore a list of zeros is returned.
320
+
321
+ Args:
322
+ token_ids_0 (`List[int]`):
323
+ List of IDs.
324
+ token_ids_1 (`List[int]`, *optional*):
325
+ Optional second list of IDs for sequence pairs.
326
+
327
+ Returns:
328
+ `List[int]`: List of zeros.
329
+
330
+ """
331
+
332
+ sep = [self.sep_token_id]
333
+ cls = [self.cls_token_id]
334
+
335
+ if token_ids_1 is None:
336
+ return len(cls + token_ids_0 + sep) * [0]
337
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
338
+
339
+ def _build_translation_inputs(
340
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
341
+ ):
342
+ """Used by translation pipeline, to prepare inputs for the generate function"""
343
+ if src_lang is None or tgt_lang is None:
344
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
345
+ self.src_lang = src_lang
346
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
347
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
348
+ inputs["forced_bos_token_id"] = tgt_lang_id
349
+ return inputs
350
+
351
+ def get_vocab(self):
352
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
353
+ vocab.update(self.added_tokens_encoder)
354
+ return vocab
355
+
356
+ def _tokenize(self, text: str) -> List[str]:
357
+ return self.sp_model.encode(text, out_type=str)
358
+
359
+ def _convert_token_to_id(self, token):
360
+ """Converts a token (str) in an id using the vocab."""
361
+ spm_id = self.sp_model.PieceToId(token)
362
+ # Need to return unknown token if the SP model returned 0
363
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
364
+
365
+ def _convert_id_to_token(self, index):
366
+ """Converts an index (integer) in a token (str) using the vocab."""
367
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
368
+
369
+ def convert_tokens_to_string(self, tokens):
370
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
371
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
372
+ return out_string
373
+
374
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
375
+ if not os.path.isdir(save_directory):
376
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
377
+ return
378
+ out_vocab_file = os.path.join(
379
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
380
+ )
381
+
382
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
383
+ copyfile(self.vocab_file, out_vocab_file)
384
+ elif not os.path.isfile(self.vocab_file):
385
+ with open(out_vocab_file, "wb") as fi:
386
+ content_spiece_model = self.sp_model.serialized_model_proto()
387
+ fi.write(content_spiece_model)
388
+
389
+ return (out_vocab_file,)
390
+
391
+ def prepare_seq2seq_batch(
392
+ self,
393
+ src_texts: List[str],
394
+ src_lang: str = "eng_Latn",
395
+ tgt_texts: Optional[List[str]] = None,
396
+ tgt_lang: str = "fra_Latn",
397
+ **kwargs,
398
+ ) -> BatchEncoding:
399
+ self.src_lang = src_lang
400
+ self.tgt_lang = tgt_lang
401
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
402
+
403
+ def _switch_to_input_mode(self):
404
+ return self.set_src_lang_special_tokens(self.src_lang)
405
+
406
+ def _switch_to_target_mode(self):
407
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
408
+
409
+ def set_src_lang_special_tokens(self, src_lang) -> None:
410
+ """Reset the special tokens to the source lang setting.
411
+ - In legacy mode: No prefix and suffix=[eos, src_lang_code].
412
+ - In default mode: Prefix=[src_lang_code], suffix = [eos]
413
+ """
414
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
415
+ if self.legacy_behaviour:
416
+ self.prefix_tokens = []
417
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
418
+ else:
419
+ self.prefix_tokens = [self.cur_lang_code]
420
+ self.suffix_tokens = [self.eos_token_id]
421
+
422
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
423
+ """Reset the special tokens to the target lang setting.
424
+ - In legacy mode: No prefix and suffix=[eos, tgt_lang_code].
425
+ - In default mode: Prefix=[tgt_lang_code], suffix = [eos]
426
+ """
427
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
428
+ if self.legacy_behaviour:
429
+ self.prefix_tokens = []
430
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
431
+ else:
432
+ self.prefix_tokens = [self.cur_lang_code]
433
+ self.suffix_tokens = [self.eos_token_id]
parrot/lib/python3.10/site-packages/transformers/models/patchtst/__init__.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ # rely on isort to merge the imports
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_patchtst": ["PatchTSTConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_patchtst"] = [
31
+ "PatchTSTModel",
32
+ "PatchTSTPreTrainedModel",
33
+ "PatchTSTForPrediction",
34
+ "PatchTSTForPretraining",
35
+ "PatchTSTForRegression",
36
+ "PatchTSTForClassification",
37
+ ]
38
+
39
+
40
+ if TYPE_CHECKING:
41
+ from .configuration_patchtst import PatchTSTConfig
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ from .modeling_patchtst import (
50
+ PatchTSTForClassification,
51
+ PatchTSTForPrediction,
52
+ PatchTSTForPretraining,
53
+ PatchTSTForRegression,
54
+ PatchTSTModel,
55
+ PatchTSTPreTrainedModel,
56
+ )
57
+
58
+ else:
59
+ import sys
60
+
61
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
parrot/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (942 Bytes). View file
 
parrot/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/configuration_patchtst.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/modeling_patchtst.cpython-310.pyc ADDED
Binary file (65 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/models/patchtst/configuration_patchtst.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PatchTST model configuration"""
16
+
17
+ from typing import List, Optional, Union
18
+
19
+ from transformers.configuration_utils import PretrainedConfig
20
+ from transformers.utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class PatchTSTConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an
29
+ PatchTST model according to the specified arguments, defining the model architecture.
30
+ [ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+ Args:
36
+ num_input_channels (`int`, *optional*, defaults to 1):
37
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
38
+ multivariate targets.
39
+ context_length (`int`, *optional*, defaults to 32):
40
+ The context length of the input sequence.
41
+ distribution_output (`str`, *optional*, defaults to `"student_t"`):
42
+ The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or
43
+ "negative_binomial".
44
+ loss (`str`, *optional*, defaults to `"mse"`):
45
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
46
+ distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared
47
+ error "mse".
48
+ patch_length (`int`, *optional*, defaults to 1):
49
+ Define the patch length of the patchification process.
50
+ patch_stride (`int`, *optional*, defaults to 1):
51
+ Define the stride of the patchification process.
52
+ num_hidden_layers (`int`, *optional*, defaults to 3):
53
+ Number of hidden layers.
54
+ d_model (`int`, *optional*, defaults to 128):
55
+ Dimensionality of the transformer layers.
56
+ num_attention_heads (`int`, *optional*, defaults to 4):
57
+ Number of attention heads for each attention layer in the Transformer encoder.
58
+ share_embedding (`bool`, *optional*, defaults to `True`):
59
+ Sharing the input embedding across all channels.
60
+ channel_attention (`bool`, *optional*, defaults to `False`):
61
+ Activate channel attention block in the Transformer to allow channels to attend each other.
62
+ ffn_dim (`int`, *optional*, defaults to 512):
63
+ Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
64
+ norm_type (`str` , *optional*, defaults to `"batchnorm"`):
65
+ Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`.
66
+ norm_eps (`float`, *optional*, defaults to 1e-05):
67
+ A value added to the denominator for numerical stability of normalization.
68
+ attention_dropout (`float`, *optional*, defaults to 0.0):
69
+ The dropout probability for the attention probabilities.
70
+ dropout (`float`, *optional*, defaults to 0.0):
71
+ The dropout probability for all fully connected layers in the Transformer.
72
+ positional_dropout (`float`, *optional*, defaults to 0.0):
73
+ The dropout probability in the positional embedding layer.
74
+ path_dropout (`float`, *optional*, defaults to 0.0):
75
+ The dropout path in the residual block.
76
+ ff_dropout (`float`, *optional*, defaults to 0.0):
77
+ The dropout probability used between the two layers of the feed-forward networks.
78
+ bias (`bool`, *optional*, defaults to `True`):
79
+ Whether to add bias in the feed-forward networks.
80
+ activation_function (`str`, *optional*, defaults to `"gelu"`):
81
+ The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported.
82
+ pre_norm (`bool`, *optional*, defaults to `True`):
83
+ Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is
84
+ applied after residual block.
85
+ positional_encoding_type (`str`, *optional*, defaults to `"sincos"`):
86
+ Positional encodings. Options `"random"` and `"sincos"` are supported.
87
+ use_cls_token (`bool`, *optional*, defaults to `False`):
88
+ Whether cls token is used.
89
+ init_std (`float`, *optional*, defaults to 0.02):
90
+ The standard deviation of the truncated normal weight initialization distribution.
91
+ share_projection (`bool`, *optional*, defaults to `True`):
92
+ Sharing the projection layer across different channels in the forecast head.
93
+ scaling (`Union`, *optional*, defaults to `"std"`):
94
+ Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
95
+ scaler is set to "mean".
96
+ do_mask_input (`bool`, *optional*):
97
+ Apply masking during the pretraining.
98
+ mask_type (`str`, *optional*, defaults to `"random"`):
99
+ Masking type. Only `"random"` and `"forecast"` are currently supported.
100
+ random_mask_ratio (`float`, *optional*, defaults to 0.5):
101
+ Masking ratio applied to mask the input data during random pretraining.
102
+ num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`):
103
+ Number of patches to be masked at the end of each batch sample. If it is an integer,
104
+ all the samples in the batch will have the same number of masked patches. If it is a list,
105
+ samples in the batch will be randomly masked by numbers defined in the list. This argument is only used
106
+ for forecast pretraining.
107
+ channel_consistent_masking (`bool`, *optional*, defaults to `False`):
108
+ If channel consistent masking is True, all the channels will have the same masking pattern.
109
+ unmasked_channel_indices (`list`, *optional*):
110
+ Indices of channels that are not masked during pretraining. Values in the list are number between 1 and
111
+ `num_input_channels`
112
+ mask_value (`int`, *optional*, defaults to 0):
113
+ Values in the masked patches will be filled by `mask_value`.
114
+ pooling_type (`str`, *optional*, defaults to `"mean"`):
115
+ Pooling of the embedding. `"mean"`, `"max"` and `None` are supported.
116
+ head_dropout (`float`, *optional*, defaults to 0.0):
117
+ The dropout probability for head.
118
+ prediction_length (`int`, *optional*, defaults to 24):
119
+ The prediction horizon that the model will output.
120
+ num_targets (`int`, *optional*, defaults to 1):
121
+ Number of targets for regression and classification tasks. For classification, it is the number of
122
+ classes.
123
+ output_range (`list`, *optional*):
124
+ Output range for regression task. The range of output values can be set to enforce the model to produce
125
+ values within a range.
126
+ num_parallel_samples (`int`, *optional*, defaults to 100):
127
+ The number of samples is generated in parallel for probabilistic prediction.
128
+
129
+
130
+ ```python
131
+ >>> from transformers import PatchTSTConfig, PatchTSTModel
132
+
133
+ >>> # Initializing an PatchTST configuration with 12 time steps for prediction
134
+ >>> configuration = PatchTSTConfig(prediction_length=12)
135
+
136
+ >>> # Randomly initializing a model (with random weights) from the configuration
137
+ >>> model = PatchTSTModel(configuration)
138
+
139
+ >>> # Accessing the model configuration
140
+ >>> configuration = model.config
141
+ ```"""
142
+
143
+ model_type = "patchtst"
144
+ attribute_map = {
145
+ "hidden_size": "d_model",
146
+ "num_attention_heads": "num_attention_heads",
147
+ "num_hidden_layers": "num_hidden_layers",
148
+ }
149
+
150
+ def __init__(
151
+ self,
152
+ # time series specific configuration
153
+ num_input_channels: int = 1,
154
+ context_length: int = 32,
155
+ distribution_output: str = "student_t",
156
+ loss: str = "mse",
157
+ # PatchTST arguments
158
+ patch_length: int = 1,
159
+ patch_stride: int = 1,
160
+ # Transformer architecture configuration
161
+ num_hidden_layers: int = 3,
162
+ d_model: int = 128,
163
+ num_attention_heads: int = 4,
164
+ share_embedding: bool = True,
165
+ channel_attention: bool = False,
166
+ ffn_dim: int = 512,
167
+ norm_type: str = "batchnorm",
168
+ norm_eps: float = 1e-05,
169
+ attention_dropout: float = 0.0,
170
+ dropout: float = 0.0,
171
+ positional_dropout: float = 0.0,
172
+ path_dropout: float = 0.0,
173
+ ff_dropout: float = 0.0,
174
+ bias: bool = True,
175
+ activation_function: str = "gelu",
176
+ pre_norm: bool = True,
177
+ positional_encoding_type: str = "sincos",
178
+ use_cls_token: bool = False,
179
+ init_std: float = 0.02,
180
+ share_projection: bool = True,
181
+ scaling: Optional[Union[str, bool]] = "std",
182
+ # mask pretraining
183
+ do_mask_input: Optional[bool] = None,
184
+ mask_type: str = "random",
185
+ random_mask_ratio: float = 0.5,
186
+ num_forecast_mask_patches: Optional[Union[List[int], int]] = [2],
187
+ channel_consistent_masking: Optional[bool] = False,
188
+ unmasked_channel_indices: Optional[List[int]] = None,
189
+ mask_value: int = 0,
190
+ # head
191
+ pooling_type: str = "mean",
192
+ head_dropout: float = 0.0,
193
+ prediction_length: int = 24,
194
+ num_targets: int = 1,
195
+ output_range: Optional[List] = None,
196
+ # distribution head
197
+ num_parallel_samples: int = 100,
198
+ **kwargs,
199
+ ):
200
+ # time series specific configuration
201
+ self.context_length = context_length
202
+ self.num_input_channels = num_input_channels # n_vars
203
+ self.loss = loss
204
+ self.distribution_output = distribution_output
205
+ self.num_parallel_samples = num_parallel_samples
206
+
207
+ # Transformer architecture configuration
208
+ self.d_model = d_model
209
+ self.num_attention_heads = num_attention_heads
210
+ self.ffn_dim = ffn_dim
211
+ self.num_hidden_layers = num_hidden_layers
212
+ self.dropout = dropout
213
+ self.attention_dropout = attention_dropout
214
+ self.share_embedding = share_embedding
215
+ self.channel_attention = channel_attention
216
+ self.norm_type = norm_type
217
+ self.norm_eps = norm_eps
218
+ self.positional_dropout = positional_dropout
219
+ self.path_dropout = path_dropout
220
+ self.ff_dropout = ff_dropout
221
+ self.bias = bias
222
+ self.activation_function = activation_function
223
+ self.pre_norm = pre_norm
224
+ self.positional_encoding_type = positional_encoding_type
225
+ self.use_cls_token = use_cls_token
226
+ self.init_std = init_std
227
+ self.scaling = scaling
228
+
229
+ # PatchTST parameters
230
+ self.patch_length = patch_length
231
+ self.patch_stride = patch_stride
232
+
233
+ # Mask pretraining
234
+ self.do_mask_input = do_mask_input
235
+ self.mask_type = mask_type
236
+ self.random_mask_ratio = random_mask_ratio # for random masking
237
+ self.num_forecast_mask_patches = num_forecast_mask_patches # for forecast masking
238
+ self.channel_consistent_masking = channel_consistent_masking
239
+ self.unmasked_channel_indices = unmasked_channel_indices
240
+ self.mask_value = mask_value
241
+
242
+ # general head params
243
+ self.pooling_type = pooling_type
244
+ self.head_dropout = head_dropout
245
+
246
+ # For prediction head
247
+ self.share_projection = share_projection
248
+ self.prediction_length = prediction_length
249
+
250
+ # For prediction and regression head
251
+ self.num_parallel_samples = num_parallel_samples
252
+
253
+ # Regression
254
+ self.num_targets = num_targets
255
+ self.output_range = output_range
256
+
257
+ super().__init__(**kwargs)