BoghdadyJR commited on
Commit
eca66ac
·
verified ·
1 Parent(s): 0c38e2e

Upload model

Browse files
Files changed (2) hide show
  1. config.json +18 -71
  2. model.safetensors +2 -2
config.json CHANGED
@@ -1,77 +1,24 @@
1
  {
2
  "architectures": [
3
- "VisionEncoderDecoderModel"
4
  ],
5
- "decoder": {
6
- "_attn_implementation_autoset": true,
7
- "_name_or_path": "gpt2",
8
- "activation_function": "gelu_new",
9
- "add_cross_attention": true,
10
- "architectures": [
11
- "GPT2LMHeadModel"
12
- ],
13
- "attn_pdrop": 0.1,
14
- "embd_pdrop": 0.1,
15
- "initializer_range": 0.02,
16
- "is_decoder": true,
17
- "layer_norm_epsilon": 1e-05,
18
- "model_type": "gpt2",
19
- "n_ctx": 1024,
20
- "n_embd": 768,
21
- "n_head": 12,
22
- "n_inner": null,
23
- "n_layer": 12,
24
- "n_positions": 1024,
25
- "reorder_and_upcast_attn": false,
26
- "resid_pdrop": 0.1,
27
- "scale_attn_by_inverse_layer_idx": false,
28
- "scale_attn_weights": true,
29
- "summary_activation": null,
30
- "summary_first_dropout": 0.1,
31
- "summary_proj_to_labels": true,
32
- "summary_type": "cls_index",
33
- "summary_use_proj": true,
34
- "task_specific_params": {
35
- "text-generation": {
36
- "do_sample": true,
37
- "max_length": 50
38
- }
39
- },
40
- "torch_dtype": "float32",
41
- "use_cache": true,
42
- "vocab_size": 50257
43
- },
44
- "decoder_start_token_id": 50256,
45
- "encoder": {
46
- "_attn_implementation_autoset": true,
47
- "_name_or_path": "google/vit-base-patch16-224-in21k",
48
- "architectures": [
49
- "ViTModel"
50
- ],
51
- "attention_probs_dropout_prob": 0.0,
52
- "encoder_stride": 16,
53
- "hidden_act": "gelu",
54
- "hidden_dropout_prob": 0.0,
55
- "hidden_size": 768,
56
- "image_size": 224,
57
- "initializer_range": 0.02,
58
- "intermediate_size": 3072,
59
- "layer_norm_eps": 1e-12,
60
- "model_type": "vit",
61
- "num_attention_heads": 12,
62
- "num_channels": 3,
63
- "num_hidden_layers": 12,
64
- "patch_size": 16,
65
- "pooler_act": "tanh",
66
- "pooler_output_size": 768,
67
- "qkv_bias": true,
68
- "torch_dtype": "float32"
69
- },
70
- "eos_token_id": 50256,
71
- "is_encoder_decoder": true,
72
- "model_type": "vision-encoder-decoder",
73
- "pad_token_id": 50256,
74
- "tie_word_embeddings": false,
75
  "torch_dtype": "float32",
76
  "transformers_version": "4.50.2"
77
  }
 
1
  {
2
  "architectures": [
3
+ "ViTModel"
4
  ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "encoder_stride": 16,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.0,
9
+ "hidden_size": 768,
10
+ "image_size": 224,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "layer_norm_eps": 1e-12,
14
+ "model_type": "vit",
15
+ "num_attention_heads": 12,
16
+ "num_channels": 3,
17
+ "num_hidden_layers": 12,
18
+ "patch_size": 16,
19
+ "pooler_act": "tanh",
20
+ "pooler_output_size": 768,
21
+ "qkv_bias": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  "torch_dtype": "float32",
23
  "transformers_version": "4.50.2"
24
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2808a2e5c8601c6d4880151fb9d94f7b002a761c84fce70c77e917a0786bf97
3
- size 956835520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:200cde6f26db8d28dce71aa48ed860c13b4a4e9bd3cc1fb9c35b47de9855b74b
3
+ size 345579424