guanshuo commited on
Commit
b98979f
·
verified ·
1 Parent(s): e4e1fc6

Pushing model from H2O LLM Studio - Experiment 74273746-83a7-459b-83ed-c4d06df0d782

Browse files
Files changed (1) hide show
  1. config.json +82 -0
config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "H2OVLChatModel"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_h2ovl_chat.H2OVLChatConfig",
7
+ "AutoModel": "modeling_h2ovl_chat.H2OVLChatModel",
8
+ "AutoModelForCausalLM": "modeling_h2ovl_chat.H2OVLChatModel"
9
+ },
10
+ "downsample_ratio": 0.5,
11
+ "dtype": "bfloat16",
12
+ "dynamic_image_size": true,
13
+ "force_image_size": 448,
14
+ "llm_config": {
15
+ "_name_or_path": "h2oai/h2o-danube2-1.8b-chat",
16
+ "architectures": [
17
+ "MistralForCausalLM"
18
+ ],
19
+ "attention_dropout": 0.0,
20
+ "dtype": "bfloat16",
21
+ "head_dim": null,
22
+ "hidden_act": "silu",
23
+ "hidden_size": 2560,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 6912,
26
+ "max_position_embeddings": 8192,
27
+ "model_type": "mistral",
28
+ "num_attention_heads": 32,
29
+ "num_hidden_layers": 24,
30
+ "num_key_value_heads": 8,
31
+ "pad_token_id": 0,
32
+ "rms_norm_eps": 1e-05,
33
+ "rope_theta": 10000,
34
+ "sliding_window": null,
35
+ "use_bfloat16": true,
36
+ "use_cache": true,
37
+ "vocab_size": 32010
38
+ },
39
+ "max_dynamic_patch": 6,
40
+ "min_dynamic_patch": 1,
41
+ "model_type": "h2ovl_chat",
42
+ "output_attentions": false,
43
+ "pad2square": false,
44
+ "ps_version": "v2",
45
+ "select_layer": -1,
46
+ "template": "h2ogpt2",
47
+ "transformers_version": null,
48
+ "use_backbone_lora": 0,
49
+ "use_llm_lora": 0,
50
+ "use_msac": true,
51
+ "use_thumbnail": true,
52
+ "vision_config": {
53
+ "_name_or_path": "OpenGVLab/InternViT-300M-448px",
54
+ "architectures": [
55
+ "InternVisionModel"
56
+ ],
57
+ "attention_dropout": 0.0,
58
+ "auto_map": {
59
+ "AutoConfig": "OpenGVLab/InternViT-300M-448px--configuration_intern_vit.InternVisionConfig",
60
+ "AutoModel": "OpenGVLab/InternViT-300M-448px--modeling_intern_vit.InternVisionModel"
61
+ },
62
+ "drop_path_rate": 0,
63
+ "dropout": 0.0,
64
+ "dtype": "bfloat16",
65
+ "hidden_act": "gelu",
66
+ "hidden_size": 1024,
67
+ "image_size": 448,
68
+ "initializer_factor": 1.0,
69
+ "initializer_range": 0.02,
70
+ "intermediate_size": 4096,
71
+ "layer_norm_eps": 1e-06,
72
+ "model_type": "intern_vit_6b",
73
+ "norm_type": "layer_norm",
74
+ "num_attention_heads": 16,
75
+ "num_channels": 3,
76
+ "num_hidden_layers": 24,
77
+ "patch_size": 14,
78
+ "qk_normalization": false,
79
+ "qkv_bias": true,
80
+ "use_flash_attn": true
81
+ }
82
+ }