byh711 commited on
Commit
75b3ae8
·
verified ·
1 Parent(s): c6ddb66

Upload Florence2ForConditionalGeneration

Browse files
Files changed (3) hide show
  1. config.json +19 -19
  2. generation_config.json +0 -9
  3. model.safetensors +2 -2
config.json CHANGED
@@ -3,8 +3,8 @@
3
  "Florence2ForConditionalGeneration"
4
  ],
5
  "auto_map": {
6
- "AutoConfig": "microsoft/Florence-2-base-ft--configuration_florence2.Florence2Config",
7
- "AutoModelForCausalLM": "microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration"
8
  },
9
  "bos_token_id": 0,
10
  "eos_token_id": 2,
@@ -12,7 +12,7 @@
12
  "is_encoder_decoder": true,
13
  "model_type": "florence2",
14
  "pad_token_id": 1,
15
- "projection_dim": 768,
16
  "text_config": {
17
  "_attn_implementation_autoset": true,
18
  "_name_or_path": "",
@@ -30,20 +30,20 @@
30
  "classif_dropout": 0.1,
31
  "classifier_dropout": 0.0,
32
  "cross_attention_hidden_size": null,
33
- "d_model": 768,
34
- "decoder_attention_heads": 12,
35
- "decoder_ffn_dim": 3072,
36
  "decoder_layerdrop": 0.0,
37
- "decoder_layers": 6,
38
  "decoder_start_token_id": 2,
39
  "diversity_penalty": 0.0,
40
  "do_sample": false,
41
  "dropout": 0.1,
42
  "early_stopping": true,
43
- "encoder_attention_heads": 12,
44
- "encoder_ffn_dim": 3072,
45
  "encoder_layerdrop": 0.0,
46
- "encoder_layers": 6,
47
  "encoder_no_repeat_ngram_size": 0,
48
  "eos_token_id": 2,
49
  "exponential_decay_length_penalty": null,
@@ -73,7 +73,7 @@
73
  "normalize_before": false,
74
  "num_beam_groups": 1,
75
  "num_beams": 3,
76
- "num_hidden_layers": 6,
77
  "num_return_sequences": 1,
78
  "output_attentions": false,
79
  "output_hidden_states": false,
@@ -124,10 +124,10 @@
124
  1
125
  ],
126
  "dim_embed": [
127
- 128,
128
  256,
129
  512,
130
- 1024
 
131
  ],
132
  "diversity_penalty": 0.0,
133
  "do_sample": false,
@@ -161,21 +161,21 @@
161
  "length_penalty": 1.0,
162
  "max_length": 20,
163
  "min_length": 0,
164
- "model_type": "davit",
165
  "no_repeat_ngram_size": 0,
166
  "num_beam_groups": 1,
167
  "num_beams": 1,
168
  "num_groups": [
169
- 4,
170
  8,
171
  16,
172
- 32
 
173
  ],
174
  "num_heads": [
175
- 4,
176
  8,
177
  16,
178
- 32
 
179
  ],
180
  "num_return_sequences": 1,
181
  "output_attentions": false,
@@ -208,7 +208,7 @@
208
  ],
209
  "prefix": null,
210
  "problem_type": null,
211
- "projection_dim": 768,
212
  "pruned_heads": {},
213
  "remove_invalid_values": false,
214
  "repetition_penalty": 1.0,
 
3
  "Florence2ForConditionalGeneration"
4
  ],
5
  "auto_map": {
6
+ "AutoConfig": "microsoft/Florence-2-large-ft--configuration_florence2.Florence2Config",
7
+ "AutoModelForCausalLM": "microsoft/Florence-2-large-ft--modeling_florence2.Florence2ForConditionalGeneration"
8
  },
9
  "bos_token_id": 0,
10
  "eos_token_id": 2,
 
12
  "is_encoder_decoder": true,
13
  "model_type": "florence2",
14
  "pad_token_id": 1,
15
+ "projection_dim": 1024,
16
  "text_config": {
17
  "_attn_implementation_autoset": true,
18
  "_name_or_path": "",
 
30
  "classif_dropout": 0.1,
31
  "classifier_dropout": 0.0,
32
  "cross_attention_hidden_size": null,
33
+ "d_model": 1024,
34
+ "decoder_attention_heads": 16,
35
+ "decoder_ffn_dim": 4096,
36
  "decoder_layerdrop": 0.0,
37
+ "decoder_layers": 12,
38
  "decoder_start_token_id": 2,
39
  "diversity_penalty": 0.0,
40
  "do_sample": false,
41
  "dropout": 0.1,
42
  "early_stopping": true,
43
+ "encoder_attention_heads": 16,
44
+ "encoder_ffn_dim": 4096,
45
  "encoder_layerdrop": 0.0,
46
+ "encoder_layers": 12,
47
  "encoder_no_repeat_ngram_size": 0,
48
  "eos_token_id": 2,
49
  "exponential_decay_length_penalty": null,
 
73
  "normalize_before": false,
74
  "num_beam_groups": 1,
75
  "num_beams": 3,
76
+ "num_hidden_layers": 12,
77
  "num_return_sequences": 1,
78
  "output_attentions": false,
79
  "output_hidden_states": false,
 
124
  1
125
  ],
126
  "dim_embed": [
 
127
  256,
128
  512,
129
+ 1024,
130
+ 2048
131
  ],
132
  "diversity_penalty": 0.0,
133
  "do_sample": false,
 
161
  "length_penalty": 1.0,
162
  "max_length": 20,
163
  "min_length": 0,
164
+ "model_type": "",
165
  "no_repeat_ngram_size": 0,
166
  "num_beam_groups": 1,
167
  "num_beams": 1,
168
  "num_groups": [
 
169
  8,
170
  16,
171
+ 32,
172
+ 64
173
  ],
174
  "num_heads": [
 
175
  8,
176
  16,
177
+ 32,
178
+ 64
179
  ],
180
  "num_return_sequences": 1,
181
  "output_attentions": false,
 
208
  ],
209
  "prefix": null,
210
  "problem_type": null,
211
+ "projection_dim": 1024,
212
  "pruned_heads": {},
213
  "remove_invalid_values": false,
214
  "repetition_penalty": 1.0,
generation_config.json CHANGED
@@ -1,13 +1,4 @@
1
  {
2
- "_from_model_config": true,
3
- "bos_token_id": 0,
4
- "decoder_start_token_id": 2,
5
- "early_stopping": true,
6
- "eos_token_id": 2,
7
- "forced_bos_token_id": 0,
8
- "forced_eos_token_id": 2,
9
- "no_repeat_ngram_size": 3,
10
  "num_beams": 3,
11
- "pad_token_id": 1,
12
  "transformers_version": "4.51.0.dev0"
13
  }
 
1
  {
 
 
 
 
 
 
 
 
2
  "num_beams": 3,
 
3
  "transformers_version": "4.51.0.dev0"
4
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76f39e229690389a7e392e889e167c0f41d618515f6dd00bc0ce31eb72e93f39
3
- size 1083916964
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:973040590566ca14d94f4539de852127c55bfe2b90d175938760ec0cba34d6a3
3
+ size 3291921348