File size: 2,871 Bytes
42d6fa2 1af1f3c 42d6fa2 1af1f3c 42d6fa2 746ac63 42d6fa2 38261a2 42d6fa2 1af1f3c 42d6fa2 1af1f3c 42d6fa2 1af1f3c 42d6fa2 38261a2 42d6fa2 1af1f3c 531be2d 1af1f3c 42d6fa2 1af1f3c 42d6fa2 1af1f3c 42d6fa2 1af1f3c 42d6fa2 1af1f3c 42d6fa2 1af1f3c 42d6fa2 1af1f3c 42d6fa2 38261a2 42d6fa2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
{
"architectures": [
"VisionEncoderDecoderModel"
],
"decoder": {
"_name_or_path": "gpt2",
"activation_function": "gelu_new",
"add_cross_attention": true,
"architectures": [
"GPT2LMHeadModel"
],
"attn_pdrop": 0.1,
"dtype": "float32",
"embd_pdrop": 0.1,
"initializer_range": 0.02,
"is_decoder": true,
"layer_norm_epsilon": 1e-05,
"model_type": "gpt2",
"n_ctx": 1024,
"n_embd": 768,
"n_head": 12,
"n_inner": null,
"n_layer": 12,
"n_positions": 1024,
"reorder_and_upcast_attn": false,
"resid_pdrop": 0.1,
"scale_attn_by_inverse_layer_idx": false,
"scale_attn_weights": true,
"summary_activation": null,
"summary_first_dropout": 0.1,
"summary_proj_to_labels": true,
"summary_type": "cls_index",
"summary_use_proj": true,
"task_specific_params": {
"text-generation": {
"do_sample": true,
"max_length": 50
}
},
"use_cache": true,
"vocab_size": 50304
},
"decoder_start_token_id": 50287,
"dtype": "float32",
"encoder": {
"_name_or_path": "microsoft/dit-base",
"add_fpn": false,
"architectures": [
"BeitForMaskedImageModeling"
],
"attention_probs_dropout_prob": 0.0,
"auxiliary_channels": 256,
"auxiliary_concat_input": false,
"auxiliary_loss_weight": 0.4,
"auxiliary_num_convs": 1,
"drop_path_rate": 0.1,
"dtype": "float32",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_size": 768,
"image_size": 2048,
"initializer_range": 0.02,
"intermediate_size": 3072,
"interpolate_pos_encoding": true,
"layer_norm_eps": 1e-12,
"layer_scale_init_value": 0.1,
"model_type": "beit",
"num_attention_heads": 12,
"num_channels": 3,
"num_hidden_layers": 12,
"out_features": [
"stage3",
"stage5",
"stage7",
"stage11"
],
"out_indices": [
3,
5,
7,
11
],
"patch_size": 16,
"pool_scales": [
1,
2,
3,
6
],
"reshape_hidden_states": true,
"semantic_loss_ignore_index": 255,
"stage_names": [
"stem",
"stage1",
"stage2",
"stage3",
"stage4",
"stage5",
"stage6",
"stage7",
"stage8",
"stage9",
"stage10",
"stage11",
"stage12"
],
"use_absolute_position_embeddings": true,
"use_auxiliary_head": true,
"use_mask_token": true,
"use_mean_pooling": true,
"use_relative_position_bias": false,
"use_shared_relative_position_bias": false,
"vocab_size": 8192
},
"eos_token_id": 50256,
"is_encoder_decoder": true,
"max_length": null,
"model_type": "vision-encoder-decoder",
"no_repeat_ngram_size": null,
"pad_token_id": 50257,
"tie_word_embeddings": false,
"transformers_version": "4.57.6"
}
|