Upgraded to match latest torch and transformers (Review needed)

#119
config.json CHANGED
@@ -1,85 +1,86 @@
1
- {
2
- "_name_or_path": "florence2",
3
- "architectures": [
4
- "Florence2ForConditionalGeneration"
5
- ],
6
- "auto_map": {
7
- "AutoConfig": "configuration_florence2.Florence2Config",
8
- "AutoModelForCausalLM": "modeling_florence2.Florence2ForConditionalGeneration"
9
- },
10
- "bos_token_id": 0,
11
- "eos_token_id": 2,
12
- "ignore_index": -100,
13
- "model_type": "florence2",
14
- "pad_token_id": 1,
15
- "projection_dim": 1024,
16
- "text_config": {
17
- "vocab_size": 51289,
18
- "activation_dropout": 0.1,
19
- "activation_function": "gelu",
20
- "add_bias_logits": false,
21
- "add_final_layer_norm": false,
22
- "attention_dropout": 0.1,
23
- "bos_token_id": 0,
24
- "classif_dropout": 0.1,
25
- "classifier_dropout": 0.0,
26
- "d_model": 1024,
27
- "decoder_attention_heads": 16,
28
- "decoder_ffn_dim": 4096,
29
- "decoder_layerdrop": 0.0,
30
- "decoder_layers": 12,
31
- "decoder_start_token_id": 2,
32
- "dropout": 0.1,
33
- "early_stopping": true,
34
- "encoder_attention_heads": 16,
35
- "encoder_ffn_dim": 4096,
36
- "encoder_layerdrop": 0.0,
37
- "encoder_layers": 12,
38
- "eos_token_id": 2,
39
- "forced_eos_token_id": 2,
40
- "forced_bos_token_id": 0,
41
- "gradient_checkpointing": false,
42
- "init_std": 0.02,
43
- "is_encoder_decoder": true,
44
- "label2id": {
45
- "LABEL_0": 0,
46
- "LABEL_1": 1,
47
- "LABEL_2": 2
48
- },
49
- "max_position_embeddings": 4096,
50
- "no_repeat_ngram_size": 3,
51
- "normalize_before": false,
52
- "num_hidden_layers": 12,
53
- "pad_token_id": 1,
54
- "scale_embedding": false,
55
- "num_beams": 3
56
- },
57
- "vision_config": {
58
- "model_type": "davit",
59
- "drop_path_rate": 0.1,
60
- "patch_size": [7, 3, 3, 3],
61
- "patch_stride": [4, 2, 2, 2],
62
- "patch_padding": [3, 1, 1, 1],
63
- "patch_prenorm": [false, true, true, true],
64
- "enable_checkpoint": false,
65
- "dim_embed": [256, 512, 1024, 2048],
66
- "num_heads": [8, 16, 32, 64],
67
- "num_groups": [8, 16, 32, 64],
68
- "depths": [1, 1, 9, 1],
69
- "window_size": 12,
70
- "projection_dim": 1024,
71
- "visual_temporal_embedding": {
72
- "type": "COSINE",
73
- "max_temporal_embeddings": 100
74
- },
75
- "image_pos_embed": {
76
- "type": "learned_abs_2d",
77
- "max_pos_embeddings": 50
78
- },
79
- "image_feature_source": ["spatial_avg_pool", "temporal_avg_pool"]
80
- },
81
- "vocab_size": 51289,
82
- "torch_dtype": "float16",
83
- "transformers_version": "4.41.0.dev0",
84
- "is_encoder_decoder": true
 
85
  }
 
1
+ {
2
+ "_name_or_path": "florence2",
3
+ "architectures": [
4
+ "Florence2ForConditionalGeneration"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_florence2.Florence2Config",
8
+ "AutoModelForCausalLM": "modeling_florence2.Florence2ForConditionalGeneration"
9
+ },
10
+ "bos_token_id": 0,
11
+ "eos_token_id": 2,
12
+ "ignore_index": -100,
13
+ "model_type": "florence2",
14
+ "pad_token_id": 1,
15
+ "projection_dim": 768,
16
+ "text_config": {
17
+ "vocab_size": 51289,
18
+ "activation_dropout": 0.1,
19
+ "activation_function": "gelu",
20
+ "add_bias_logits": false,
21
+ "add_final_layer_norm": false,
22
+ "attention_dropout": 0.1,
23
+ "bos_token_id": 0,
24
+ "classif_dropout": 0.1,
25
+ "classifier_dropout": 0.0,
26
+ "d_model": 768,
27
+ "decoder_attention_heads": 12,
28
+ "decoder_ffn_dim": 3072,
29
+ "decoder_layerdrop": 0.0,
30
+ "decoder_layers": 6,
31
+ "decoder_start_token_id": 2,
32
+ "dropout": 0.1,
33
+ "early_stopping": true,
34
+ "encoder_attention_heads": 12,
35
+ "encoder_ffn_dim": 3072,
36
+ "encoder_layerdrop": 0.0,
37
+ "encoder_layers": 6,
38
+ "eos_token_id": 2,
39
+ "forced_eos_token_id": 2,
40
+ "forced_bos_token_id": 0,
41
+ "gradient_checkpointing": false,
42
+ "init_std": 0.02,
43
+ "is_encoder_decoder": true,
44
+ "label2id": {
45
+ "LABEL_0": 0,
46
+ "LABEL_1": 1,
47
+ "LABEL_2": 2
48
+ },
49
+ "max_position_embeddings": 1024,
50
+ "no_repeat_ngram_size": 3,
51
+ "normalize_before": false,
52
+ "num_hidden_layers": 6,
53
+ "pad_token_id": 1,
54
+ "scale_embedding": false,
55
+ "num_beams": 3,
56
+ "tie_word_embeddings": true
57
+ },
58
+ "vision_config": {
59
+ "model_type": "davit",
60
+ "drop_path_rate": 0.1,
61
+ "patch_size": [7, 3, 3, 3],
62
+ "patch_stride": [4, 2, 2, 2],
63
+ "patch_padding": [3, 1, 1, 1],
64
+ "patch_prenorm": [false, true, true, true],
65
+ "enable_checkpoint": false,
66
+ "dim_embed": [128, 256, 512, 1024],
67
+ "num_heads": [4, 8, 16, 32],
68
+ "num_groups": [4, 8, 16, 32],
69
+ "depths": [1, 1, 9, 1],
70
+ "window_size": 12,
71
+ "projection_dim": 768,
72
+ "visual_temporal_embedding": {
73
+ "type": "COSINE",
74
+ "max_temporal_embeddings": 100
75
+ },
76
+ "image_pos_embed": {
77
+ "type": "learned_abs_2d",
78
+ "max_pos_embeddings": 50
79
+ },
80
+ "image_feature_source": ["spatial_avg_pool", "temporal_avg_pool"]
81
+ },
82
+ "vocab_size": 51289,
83
+ "torch_dtype": "float16",
84
+ "transformers_version": "4.41.0.dev0",
85
+ "is_encoder_decoder": true
86
  }
configuration_florence2.py CHANGED
@@ -1,340 +1,360 @@
1
- # coding=utf-8
2
- # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import warnings
15
- """ Florence-2 configuration"""
16
-
17
- from typing import Optional
18
-
19
- from transformers import AutoConfig
20
- from transformers.configuration_utils import PretrainedConfig
21
- from transformers.utils import logging
22
-
23
- logger = logging.get_logger(__name__)
24
-
25
- class Florence2VisionConfig(PretrainedConfig):
26
- r"""
27
- This is the configuration class to store the configuration of a [`Florence2VisionModel`]. It is used to instantiate a Florence2VisionModel
28
- according to the specified arguments, defining the model architecture. Instantiating a configuration with the
29
- defaults will yield a similar configuration to that of the Florence2VisionModel architecture.
30
-
31
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
- documentation from [`PretrainedConfig`] for more information.
33
-
34
- Args:
35
- drop_path_rate (`float`, *optional*, defaults to 0.1):
36
- The dropout rate of the drop path layer.
37
- patch_size (`List[int]`, *optional*, defaults to [7, 3, 3, 3]):
38
- The patch size of the image.
39
- patch_stride (`List[int]`, *optional*, defaults to [4, 2, 2, 2]):
40
- The patch stride of the image.
41
- patch_padding (`List[int]`, *optional*, defaults to [3, 1, 1, 1]):
42
- The patch padding of the image.
43
- patch_prenorm (`List[bool]`, *optional*, defaults to [false, true, true, true]):
44
- Whether to apply layer normalization before the patch embedding layer.
45
- enable_checkpoint (`bool`, *optional*, defaults to False):
46
- Whether to enable checkpointing.
47
- dim_embed (`List[int]`, *optional*, defaults to [256, 512, 1024, 2048]):
48
- The dimension of the embedding layer.
49
- num_heads (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
50
- The number of attention heads.
51
- num_groups (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
52
- The number of groups.
53
- depths (`List[int]`, *optional*, defaults to [1, 1, 9, 1]):
54
- The depth of the model.
55
- window_size (`int`, *optional*, defaults to 12):
56
- The window size of the model.
57
- projection_dim (`int`, *optional*, defaults to 1024):
58
- The dimension of the projection layer.
59
- visual_temporal_embedding (`dict`, *optional*):
60
- The configuration of the visual temporal embedding.
61
- image_pos_embed (`dict`, *optional*):
62
- The configuration of the image position embedding.
63
- image_feature_source (`List[str]`, *optional*, defaults to ["spatial_avg_pool", "temporal_avg_pool"]):
64
- The source of the image feature.
65
- Example:
66
-
67
- ```python
68
- >>> from transformers import Florence2VisionConfig, Florence2VisionModel
69
-
70
- >>> # Initializing a Florence2 Vision style configuration
71
- >>> configuration = Florence2VisionConfig()
72
-
73
- >>> # Initializing a model (with random weights)
74
- >>> model = Florence2VisionModel(configuration)
75
-
76
- >>> # Accessing the model configuration
77
- >>> configuration = model.config
78
- ```"""
79
-
80
- model_type = "davit"
81
- keys_to_ignore_at_inference = ["past_key_values"]
82
-
83
- def __init__(
84
- self,
85
- drop_path_rate=0.1,
86
- patch_size=[7, 3, 3, 3],
87
- patch_stride=[4, 2, 2, 2],
88
- patch_padding=[3, 1, 1, 1],
89
- patch_prenorm=[False, True, True, True],
90
- enable_checkpoint=False,
91
- dim_embed=[256, 512, 1024, 2048],
92
- num_heads=[8, 16, 32, 64],
93
- num_groups=[8, 16, 32, 64],
94
- depths=[1, 1, 9, 1],
95
- window_size=12,
96
- projection_dim=1024,
97
- visual_temporal_embedding=None,
98
- image_pos_embed=None,
99
- image_feature_source=["spatial_avg_pool", "temporal_avg_pool"],
100
- **kwargs,
101
- ):
102
- self.drop_path_rate = drop_path_rate
103
- self.patch_size = patch_size
104
- self.patch_stride = patch_stride
105
- self.patch_padding = patch_padding
106
- self.patch_prenorm = patch_prenorm
107
- self.enable_checkpoint = enable_checkpoint
108
- self.dim_embed = dim_embed
109
- self.num_heads = num_heads
110
- self.num_groups = num_groups
111
- self.depths = depths
112
- self.window_size = window_size
113
- self.projection_dim = projection_dim
114
- self.visual_temporal_embedding = visual_temporal_embedding
115
- self.image_pos_embed = image_pos_embed
116
- self.image_feature_source = image_feature_source
117
-
118
- super().__init__(**kwargs)
119
-
120
-
121
-
122
- class Florence2LanguageConfig(PretrainedConfig):
123
- r"""
124
- This is the configuration class to store the configuration of a [`Florence2LanguagePreTrainedModel`]. It is used to instantiate a BART
125
- model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
126
- defaults will yield a similar configuration to that of the BART
127
- [facebook/bart-large](https://huggingface.co/facebook/bart-large) architecture.
128
-
129
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
130
- documentation from [`PretrainedConfig`] for more information.
131
-
132
-
133
- Args:
134
- vocab_size (`int`, *optional*, defaults to 51289):
135
- Vocabulary size of the Florence2Language model. Defines the number of different tokens that can be represented by the
136
- `inputs_ids` passed when calling [`Florence2LanguageModel`].
137
- d_model (`int`, *optional*, defaults to 1024):
138
- Dimensionality of the layers and the pooler layer.
139
- encoder_layers (`int`, *optional*, defaults to 12):
140
- Number of encoder layers.
141
- decoder_layers (`int`, *optional*, defaults to 12):
142
- Number of decoder layers.
143
- encoder_attention_heads (`int`, *optional*, defaults to 16):
144
- Number of attention heads for each attention layer in the Transformer encoder.
145
- decoder_attention_heads (`int`, *optional*, defaults to 16):
146
- Number of attention heads for each attention layer in the Transformer decoder.
147
- decoder_ffn_dim (`int`, *optional*, defaults to 4096):
148
- Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
149
- encoder_ffn_dim (`int`, *optional*, defaults to 4096):
150
- Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
151
- activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
152
- The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
153
- `"relu"`, `"silu"` and `"gelu_new"` are supported.
154
- dropout (`float`, *optional*, defaults to 0.1):
155
- The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
156
- attention_dropout (`float`, *optional*, defaults to 0.0):
157
- The dropout ratio for the attention probabilities.
158
- activation_dropout (`float`, *optional*, defaults to 0.0):
159
- The dropout ratio for activations inside the fully connected layer.
160
- classifier_dropout (`float`, *optional*, defaults to 0.0):
161
- The dropout ratio for classifier.
162
- max_position_embeddings (`int`, *optional*, defaults to 1024):
163
- The maximum sequence length that this model might ever be used with. Typically set this to something large
164
- just in case (e.g., 512 or 1024 or 2048).
165
- init_std (`float`, *optional*, defaults to 0.02):
166
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
167
- encoder_layerdrop (`float`, *optional*, defaults to 0.0):
168
- The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
169
- for more details.
170
- decoder_layerdrop (`float`, *optional*, defaults to 0.0):
171
- The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
172
- for more details.
173
- scale_embedding (`bool`, *optional*, defaults to `False`):
174
- Scale embeddings by diving by sqrt(d_model).
175
- use_cache (`bool`, *optional*, defaults to `True`):
176
- Whether or not the model should return the last key/values attentions (not used by all models).
177
- num_labels (`int`, *optional*, defaults to 3):
178
- The number of labels to use in [`Florence2LanguageForSequenceClassification`].
179
- forced_eos_token_id (`int`, *optional*, defaults to 2):
180
- The id of the token to force as the last generated token when `max_length` is reached. Usually set to
181
- `eos_token_id`.
182
-
183
- Example:
184
-
185
- ```python
186
- >>> from transformers import Florence2LanguageConfig, Florence2LanguageModel
187
-
188
- >>> # Initializing a Florence2 Language style configuration
189
- >>> configuration = Florence2LanguageConfig()
190
-
191
- >>> # Initializing a model (with random weights)
192
- >>> model = Florence2LangaugeModel(configuration)
193
-
194
- >>> # Accessing the model configuration
195
- >>> configuration = model.config
196
- ```"""
197
-
198
- model_type = "florence2_language"
199
- keys_to_ignore_at_inference = ["past_key_values"]
200
- attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
201
-
202
- def __init__(
203
- self,
204
- vocab_size=51289,
205
- max_position_embeddings=1024,
206
- encoder_layers=12,
207
- encoder_ffn_dim=4096,
208
- encoder_attention_heads=16,
209
- decoder_layers=12,
210
- decoder_ffn_dim=4096,
211
- decoder_attention_heads=16,
212
- encoder_layerdrop=0.0,
213
- decoder_layerdrop=0.0,
214
- activation_function="gelu",
215
- d_model=1024,
216
- dropout=0.1,
217
- attention_dropout=0.0,
218
- activation_dropout=0.0,
219
- init_std=0.02,
220
- classifier_dropout=0.0,
221
- scale_embedding=False,
222
- use_cache=True,
223
- num_labels=3,
224
- pad_token_id=1,
225
- bos_token_id=0,
226
- eos_token_id=2,
227
- is_encoder_decoder=True,
228
- decoder_start_token_id=2,
229
- forced_eos_token_id=2,
230
- **kwargs,
231
- ):
232
- self.vocab_size = vocab_size
233
- self.max_position_embeddings = max_position_embeddings
234
- self.d_model = d_model
235
- self.encoder_ffn_dim = encoder_ffn_dim
236
- self.encoder_layers = encoder_layers
237
- self.encoder_attention_heads = encoder_attention_heads
238
- self.decoder_ffn_dim = decoder_ffn_dim
239
- self.decoder_layers = decoder_layers
240
- self.decoder_attention_heads = decoder_attention_heads
241
- self.dropout = dropout
242
- self.attention_dropout = attention_dropout
243
- self.activation_dropout = activation_dropout
244
- self.activation_function = activation_function
245
- self.init_std = init_std
246
- self.encoder_layerdrop = encoder_layerdrop
247
- self.decoder_layerdrop = decoder_layerdrop
248
- self.classifier_dropout = classifier_dropout
249
- self.use_cache = use_cache
250
- self.num_hidden_layers = encoder_layers
251
- self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
252
-
253
- super().__init__(
254
- num_labels=num_labels,
255
- pad_token_id=pad_token_id,
256
- bos_token_id=bos_token_id,
257
- eos_token_id=eos_token_id,
258
- is_encoder_decoder=is_encoder_decoder,
259
- decoder_start_token_id=decoder_start_token_id,
260
- forced_eos_token_id=forced_eos_token_id,
261
- **kwargs,
262
- )
263
-
264
- # ensure backward compatibility for BART CNN models
265
- if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
266
- self.forced_bos_token_id = self.bos_token_id
267
- warnings.warn(
268
- f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
269
- "The config can simply be saved and uploaded again to be fixed."
270
- )
271
-
272
- class Florence2Config(PretrainedConfig):
273
- r"""
274
- This is the configuration class to store the configuration of a [`Florence2ForConditionalGeneration`]. It is used to instantiate an
275
- Florence-2 model according to the specified arguments, defining the model architecture.
276
-
277
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
278
- documentation from [`PretrainedConfig`] for more information.
279
-
280
- Args:
281
- vision_config (`Florence2VisionConfig`, *optional*):
282
- Custom vision config or dict
283
- text_config (`Union[AutoConfig, dict]`, *optional*):
284
- The config object of the text backbone.
285
- ignore_index (`int`, *optional*, defaults to -100):
286
- The ignore index for the loss function.
287
- vocab_size (`int`, *optional*, defaults to 51289):
288
- Vocabulary size of the Florence2model. Defines the number of different tokens that can be represented by the
289
- `inputs_ids` passed when calling [`~Florence2ForConditionalGeneration`]
290
- projection_dim (`int`, *optional*, defaults to 1024):
291
- Dimension of the multimodal projection space.
292
-
293
- Example:
294
-
295
- ```python
296
- >>> from transformers import Florence2ForConditionalGeneration, Florence2Config, CLIPVisionConfig, BartConfig
297
-
298
- >>> # Initializing a clip-like vision config
299
- >>> vision_config = CLIPVisionConfig()
300
-
301
- >>> # Initializing a Bart config
302
- >>> text_config = BartConfig()
303
-
304
- >>> # Initializing a Florence-2 configuration
305
- >>> configuration = Florence2Config(vision_config, text_config)
306
-
307
- >>> # Initializing a model from the florence-2 configuration
308
- >>> model = Florence2ForConditionalGeneration(configuration)
309
-
310
- >>> # Accessing the model configuration
311
- >>> configuration = model.config
312
- ```"""
313
-
314
- model_type = "florence2"
315
- is_composition = False
316
-
317
- def __init__(
318
- self,
319
- vision_config=None,
320
- text_config=None,
321
- ignore_index=-100,
322
- vocab_size=51289,
323
- projection_dim=1024,
324
- **kwargs,
325
- ):
326
- self.ignore_index = ignore_index
327
- self.vocab_size = vocab_size
328
- self.projection_dim = projection_dim
329
- if vision_config is not None:
330
- vision_config = Florence2VisionConfig(**vision_config)
331
- self.vision_config = vision_config
332
- self.vocab_size = self.vocab_size
333
-
334
- self.text_config = text_config
335
- if text_config is not None:
336
- self.text_config = Florence2LanguageConfig(**text_config)
337
-
338
-
339
- super().__init__(**kwargs)
340
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import warnings
15
+ """ Florence-2 configuration"""
16
+
17
+ from typing import Optional
18
+
19
+ from transformers import AutoConfig
20
+ from transformers.configuration_utils import PretrainedConfig
21
+ from transformers.utils import logging
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ class Florence2VisionConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`Florence2VisionModel`]. It is used to instantiate a Florence2VisionModel
28
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
29
+ defaults will yield a similar configuration to that of the Florence2VisionModel architecture.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+ Args:
35
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
36
+ The dropout rate of the drop path layer.
37
+ patch_size (`List[int]`, *optional*, defaults to [7, 3, 3, 3]):
38
+ The patch size of the image.
39
+ patch_stride (`List[int]`, *optional*, defaults to [4, 2, 2, 2]):
40
+ The patch stride of the image.
41
+ patch_padding (`List[int]`, *optional*, defaults to [3, 1, 1, 1]):
42
+ The patch padding of the image.
43
+ patch_prenorm (`List[bool]`, *optional*, defaults to [false, true, true, true]):
44
+ Whether to apply layer normalization before the patch embedding layer.
45
+ enable_checkpoint (`bool`, *optional*, defaults to False):
46
+ Whether to enable checkpointing.
47
+ dim_embed (`List[int]`, *optional*, defaults to [256, 512, 1024, 2048]):
48
+ The dimension of the embedding layer.
49
+ num_heads (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
50
+ The number of attention heads.
51
+ num_groups (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
52
+ The number of groups.
53
+ depths (`List[int]`, *optional*, defaults to [1, 1, 9, 1]):
54
+ The depth of the model.
55
+ window_size (`int`, *optional*, defaults to 12):
56
+ The window size of the model.
57
+ projection_dim (`int`, *optional*, defaults to 1024):
58
+ The dimension of the projection layer.
59
+ visual_temporal_embedding (`dict`, *optional*):
60
+ The configuration of the visual temporal embedding.
61
+ image_pos_embed (`dict`, *optional*):
62
+ The configuration of the image position embedding.
63
+ image_feature_source (`List[str]`, *optional*, defaults to ["spatial_avg_pool", "temporal_avg_pool"]):
64
+ The source of the image feature.
65
+ Example:
66
+
67
+ ```python
68
+ >>> from transformers import Florence2VisionConfig, Florence2VisionModel
69
+
70
+ >>> # Initializing a Florence2 Vision style configuration
71
+ >>> configuration = Florence2VisionConfig()
72
+
73
+ >>> # Initializing a model (with random weights)
74
+ >>> model = Florence2VisionModel(configuration)
75
+
76
+ >>> # Accessing the model configuration
77
+ >>> configuration = model.config
78
+ ```"""
79
+
80
+ model_type = "davit"
81
+ keys_to_ignore_at_inference = ["past_key_values"]
82
+
83
+ def __init__(
84
+ self,
85
+ drop_path_rate=0.1,
86
+ patch_size=None,
87
+ patch_stride=None,
88
+ patch_padding=None,
89
+ patch_prenorm=None,
90
+ enable_checkpoint=False,
91
+ dim_embed=None,
92
+ num_heads=None,
93
+ num_groups=None,
94
+ depths=None,
95
+ window_size=12,
96
+ projection_dim=1024,
97
+ visual_temporal_embedding=None,
98
+ image_pos_embed=None,
99
+ image_feature_source=None,
100
+ **kwargs,
101
+ ):
102
+ if patch_size is None:
103
+ patch_size = [7, 3, 3, 3]
104
+ if patch_stride is None:
105
+ patch_stride = [4, 2, 2, 2]
106
+ if patch_padding is None:
107
+ patch_padding = [3, 1, 1, 1]
108
+ if patch_prenorm is None:
109
+ patch_prenorm = [False, True, True, True]
110
+ if dim_embed is None:
111
+ dim_embed = [256, 512, 1024, 2048]
112
+ if num_heads is None:
113
+ num_heads = [8, 16, 32, 64]
114
+ if num_groups is None:
115
+ num_groups = [8, 16, 32, 64]
116
+ if depths is None:
117
+ depths = [1, 1, 9, 1]
118
+ if image_feature_source is None:
119
+ image_feature_source = ["spatial_avg_pool", "temporal_avg_pool"]
120
+
121
+ self.drop_path_rate = drop_path_rate
122
+ self.patch_size = patch_size
123
+ self.patch_stride = patch_stride
124
+ self.patch_padding = patch_padding
125
+ self.patch_prenorm = patch_prenorm
126
+ self.enable_checkpoint = enable_checkpoint
127
+ self.dim_embed = dim_embed
128
+ self.num_heads = num_heads
129
+ self.num_groups = num_groups
130
+ self.depths = depths
131
+ self.window_size = window_size
132
+ self.projection_dim = projection_dim
133
+ self.visual_temporal_embedding = visual_temporal_embedding
134
+ self.image_pos_embed = image_pos_embed
135
+ self.image_feature_source = image_feature_source
136
+
137
+ super().__init__(**kwargs)
138
+
139
+
140
+
141
+ class Florence2LanguageConfig(PretrainedConfig):
142
+ r"""
143
+ This is the configuration class to store the configuration of a [`Florence2LanguagePreTrainedModel`]. It is used to instantiate a BART
144
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
145
+ defaults will yield a similar configuration to that of the BART
146
+ [facebook/bart-large](https://huggingface.co/facebook/bart-large) architecture.
147
+
148
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
149
+ documentation from [`PretrainedConfig`] for more information.
150
+
151
+
152
+ Args:
153
+ vocab_size (`int`, *optional*, defaults to 51289):
154
+ Vocabulary size of the Florence2Language model. Defines the number of different tokens that can be represented by the
155
+ `inputs_ids` passed when calling [`Florence2LanguageModel`].
156
+ d_model (`int`, *optional*, defaults to 1024):
157
+ Dimensionality of the layers and the pooler layer.
158
+ encoder_layers (`int`, *optional*, defaults to 12):
159
+ Number of encoder layers.
160
+ decoder_layers (`int`, *optional*, defaults to 12):
161
+ Number of decoder layers.
162
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
163
+ Number of attention heads for each attention layer in the Transformer encoder.
164
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
165
+ Number of attention heads for each attention layer in the Transformer decoder.
166
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
167
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
168
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
169
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
170
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
171
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
172
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
173
+ dropout (`float`, *optional*, defaults to 0.1):
174
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
175
+ attention_dropout (`float`, *optional*, defaults to 0.0):
176
+ The dropout ratio for the attention probabilities.
177
+ activation_dropout (`float`, *optional*, defaults to 0.0):
178
+ The dropout ratio for activations inside the fully connected layer.
179
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
180
+ The dropout ratio for classifier.
181
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
182
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
183
+ just in case (e.g., 512 or 1024 or 2048).
184
+ init_std (`float`, *optional*, defaults to 0.02):
185
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
186
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
187
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
188
+ for more details.
189
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
190
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
191
+ for more details.
192
+ scale_embedding (`bool`, *optional*, defaults to `False`):
193
+ Scale embeddings by diving by sqrt(d_model).
194
+ use_cache (`bool`, *optional*, defaults to `True`):
195
+ Whether or not the model should return the last key/values attentions (not used by all models).
196
+ num_labels (`int`, *optional*, defaults to 3):
197
+ The number of labels to use in [`Florence2LanguageForSequenceClassification`].
198
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
199
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
200
+ `eos_token_id`.
201
+
202
+ Example:
203
+
204
+ ```python
205
+ >>> from transformers import Florence2LanguageConfig, Florence2LanguageModel
206
+
207
+ >>> # Initializing a Florence2 Language style configuration
208
+ >>> configuration = Florence2LanguageConfig()
209
+
210
+ >>> # Initializing a model (with random weights)
211
+ >>> model = Florence2LangaugeModel(configuration)
212
+
213
+ >>> # Accessing the model configuration
214
+ >>> configuration = model.config
215
+ ```"""
216
+
217
+ model_type = "florence2_language"
218
+ keys_to_ignore_at_inference = ["past_key_values"]
219
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
220
+
221
+ def __init__(
222
+ self,
223
+ vocab_size=51289,
224
+ max_position_embeddings=1024,
225
+ encoder_layers=12,
226
+ encoder_ffn_dim=4096,
227
+ encoder_attention_heads=16,
228
+ decoder_layers=12,
229
+ decoder_ffn_dim=4096,
230
+ decoder_attention_heads=16,
231
+ encoder_layerdrop=0.0,
232
+ decoder_layerdrop=0.0,
233
+ activation_function="gelu",
234
+ d_model=1024,
235
+ dropout=0.1,
236
+ attention_dropout=0.0,
237
+ activation_dropout=0.0,
238
+ init_std=0.02,
239
+ classifier_dropout=0.0,
240
+ scale_embedding=False,
241
+ use_cache=True,
242
+ num_labels=3,
243
+ pad_token_id=1,
244
+ bos_token_id=0,
245
+ eos_token_id=2,
246
+ is_encoder_decoder=True,
247
+ decoder_start_token_id=2,
248
+ forced_eos_token_id=2,
249
+ **kwargs,
250
+ ):
251
+ self.vocab_size = vocab_size
252
+ self.max_position_embeddings = max_position_embeddings
253
+ self.d_model = d_model
254
+ self.encoder_ffn_dim = encoder_ffn_dim
255
+ self.encoder_layers = encoder_layers
256
+ self.encoder_attention_heads = encoder_attention_heads
257
+ self.decoder_ffn_dim = decoder_ffn_dim
258
+ self.decoder_layers = decoder_layers
259
+ self.decoder_attention_heads = decoder_attention_heads
260
+ self.dropout = dropout
261
+ self.attention_dropout = attention_dropout
262
+ self.activation_dropout = activation_dropout
263
+ self.activation_function = activation_function
264
+ self.init_std = init_std
265
+ self.encoder_layerdrop = encoder_layerdrop
266
+ self.decoder_layerdrop = decoder_layerdrop
267
+ self.classifier_dropout = classifier_dropout
268
+ self.use_cache = use_cache
269
+ self.num_hidden_layers = encoder_layers
270
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
271
+
272
+ super().__init__(
273
+ num_labels=num_labels,
274
+ pad_token_id=pad_token_id,
275
+ bos_token_id=bos_token_id,
276
+ eos_token_id=eos_token_id,
277
+ is_encoder_decoder=is_encoder_decoder,
278
+ decoder_start_token_id=decoder_start_token_id,
279
+ forced_eos_token_id=forced_eos_token_id,
280
+ **kwargs,
281
+ )
282
+
283
+ self.forced_bos_token_id = kwargs.get("forced_bos_token_id", None)
284
+ # ensure backward compatibility for BART CNN models
285
+ if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
286
+ self.forced_bos_token_id = self.bos_token_id
287
+ warnings.warn(
288
+ f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
289
+ "The config can simply be saved and uploaded again to be fixed."
290
+ )
291
+
292
+ class Florence2Config(PretrainedConfig):
293
+ r"""
294
+ This is the configuration class to store the configuration of a [`Florence2ForConditionalGeneration`]. It is used to instantiate an
295
+ Florence-2 model according to the specified arguments, defining the model architecture.
296
+
297
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
298
+ documentation from [`PretrainedConfig`] for more information.
299
+
300
+ Args:
301
+ vision_config (`Florence2VisionConfig`, *optional*):
302
+ Custom vision config or dict
303
+ text_config (`Union[AutoConfig, dict]`, *optional*):
304
+ The config object of the text backbone.
305
+ ignore_index (`int`, *optional*, defaults to -100):
306
+ The ignore index for the loss function.
307
+ vocab_size (`int`, *optional*, defaults to 51289):
308
+ Vocabulary size of the Florence2model. Defines the number of different tokens that can be represented by the
309
+ `inputs_ids` passed when calling [`~Florence2ForConditionalGeneration`]
310
+ projection_dim (`int`, *optional*, defaults to 1024):
311
+ Dimension of the multimodal projection space.
312
+
313
+ Example:
314
+
315
+ ```python
316
+ >>> from transformers import Florence2ForConditionalGeneration, Florence2Config, CLIPVisionConfig, BartConfig
317
+
318
+ >>> # Initializing a clip-like vision config
319
+ >>> vision_config = CLIPVisionConfig()
320
+
321
+ >>> # Initializing a Bart config
322
+ >>> text_config = BartConfig()
323
+
324
+ >>> # Initializing a Florence-2 configuration
325
+ >>> configuration = Florence2Config(vision_config, text_config)
326
+
327
+ >>> # Initializing a model from the florence-2 configuration
328
+ >>> model = Florence2ForConditionalGeneration(configuration)
329
+
330
+ >>> # Accessing the model configuration
331
+ >>> configuration = model.config
332
+ ```"""
333
+
334
+ model_type = "florence2"
335
+ is_composition = False
336
+
337
+ def __init__(
338
+ self,
339
+ vision_config=None,
340
+ text_config=None,
341
+ ignore_index=-100,
342
+ vocab_size=51289,
343
+ projection_dim=1024,
344
+ **kwargs,
345
+ ):
346
+ self.ignore_index = ignore_index
347
+ self.vocab_size = vocab_size
348
+ self.projection_dim = projection_dim
349
+ if vision_config is not None:
350
+ vision_config = Florence2VisionConfig(**vision_config)
351
+ self.vision_config = vision_config
352
+ self.vocab_size = self.vocab_size
353
+
354
+ self.text_config = text_config
355
+ if text_config is not None:
356
+ self.text_config = Florence2LanguageConfig(**text_config)
357
+
358
+
359
+ super().__init__(**kwargs)
360
+
modeling_florence2.py CHANGED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json CHANGED
@@ -1,39 +1,24 @@
1
- {
2
- "auto_map": {
3
- "AutoProcessor": "processing_florence2.Florence2Processor"
4
- },
5
- "_valid_processor_keys": [
6
- "images",
7
- "do_resize",
8
- "size",
9
- "resample",
10
- "do_rescale",
11
- "rescale_factor",
12
- "do_normalize",
13
- "image_mean",
14
- "image_std",
15
- "return_tensors",
16
- "data_format",
17
- "input_data_format",
18
- "do_convert_rgb"
19
- ],
20
- "do_convert_rgb": null,
21
- "do_normalize": true,
22
- "do_rescale": true,
23
- "do_resize": true,
24
- "do_center_crop": false,
25
- "image_processor_type": "CLIPImageProcessor",
26
- "image_seq_length": 577,
27
- "image_mean": [0.485, 0.456, 0.406],
28
- "image_std": [0.229, 0.224, 0.225],
29
- "processor_class": "Florence2Processor",
30
- "resample": 3,
31
- "size": {
32
- "height": 768,
33
- "width":768
34
- },
35
- "crop_size": {
36
- "height": 768,
37
- "width": 768
38
- }
39
  }
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_florence2.Florence2Processor"
4
+ },
5
+ "do_convert_rgb": null,
6
+ "do_normalize": true,
7
+ "do_rescale": true,
8
+ "do_resize": true,
9
+ "do_center_crop": false,
10
+ "image_processor_type": "CLIPImageProcessor",
11
+ "image_seq_length": 576,
12
+ "image_mean": [0.485, 0.456, 0.406],
13
+ "image_std": [0.229, 0.224, 0.225],
14
+ "processor_class": "Florence2Processor",
15
+ "resample": 3,
16
+ "size": {
17
+ "height": 768,
18
+ "width":768
19
+ },
20
+ "crop_size": {
21
+ "height": 768,
22
+ "width": 768
23
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  }
processing_florence2.py CHANGED
The diff for this file is too large to render. See raw diff