Thatphum commited on
Commit
8101052
·
verified ·
1 Parent(s): 3174070

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +86 -1
config.json CHANGED
@@ -75,5 +75,90 @@
75
  "use_cache": true,
76
  "use_im_start_end": true,
77
  "use_sliding_window": false,
78
- "vocab_size": 151860
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  }
 
75
  "use_cache": true,
76
  "use_im_start_end": true,
77
  "use_sliding_window": false,
78
+ "vocab_size": 151860,
79
+ "quantization_config": {
80
+ "config_groups": {
81
+ "group_0": {
82
+ "format": "pack-quantized",
83
+ "input_activations": null,
84
+ "output_activations": null,
85
+ "targets": [
86
+ "Linear"
87
+ ],
88
+ "weights": {
89
+ "actorder": null,
90
+ "block_structure": null,
91
+ "dynamic": false,
92
+ "group_size": 128,
93
+ "num_bits": 4,
94
+ "observer": "minmax",
95
+ "observer_kwargs": {},
96
+ "strategy": "group",
97
+ "symmetric": true,
98
+ "type": "int"
99
+ }
100
+ }
101
+ },
102
+ "format": "pack-quantized",
103
+ "global_compression_ratio": null,
104
+ "ignore": [
105
+ "model.vision_tower_high.blocks.0.attn.qkv",
106
+ "model.vision_tower_high.blocks.0.attn.proj",
107
+ "model.vision_tower_high.blocks.0.mlp.lin1",
108
+ "model.vision_tower_high.blocks.0.mlp.lin2",
109
+ "model.vision_tower_high.blocks.1.attn.qkv",
110
+ "model.vision_tower_high.blocks.1.attn.proj",
111
+ "model.vision_tower_high.blocks.1.mlp.lin1",
112
+ "model.vision_tower_high.blocks.1.mlp.lin2",
113
+ "model.vision_tower_high.blocks.2.attn.qkv",
114
+ "model.vision_tower_high.blocks.2.attn.proj",
115
+ "model.vision_tower_high.blocks.2.mlp.lin1",
116
+ "model.vision_tower_high.blocks.2.mlp.lin2",
117
+ "model.vision_tower_high.blocks.3.attn.qkv",
118
+ "model.vision_tower_high.blocks.3.attn.proj",
119
+ "model.vision_tower_high.blocks.3.mlp.lin1",
120
+ "model.vision_tower_high.blocks.3.mlp.lin2",
121
+ "model.vision_tower_high.blocks.4.attn.qkv",
122
+ "model.vision_tower_high.blocks.4.attn.proj",
123
+ "model.vision_tower_high.blocks.4.mlp.lin1",
124
+ "model.vision_tower_high.blocks.4.mlp.lin2",
125
+ "model.vision_tower_high.blocks.5.attn.qkv",
126
+ "model.vision_tower_high.blocks.5.attn.proj",
127
+ "model.vision_tower_high.blocks.5.mlp.lin1",
128
+ "model.vision_tower_high.blocks.5.mlp.lin2",
129
+ "model.vision_tower_high.blocks.6.attn.qkv",
130
+ "model.vision_tower_high.blocks.6.attn.proj",
131
+ "model.vision_tower_high.blocks.6.mlp.lin1",
132
+ "model.vision_tower_high.blocks.6.mlp.lin2",
133
+ "model.vision_tower_high.blocks.7.attn.qkv",
134
+ "model.vision_tower_high.blocks.7.attn.proj",
135
+ "model.vision_tower_high.blocks.7.mlp.lin1",
136
+ "model.vision_tower_high.blocks.7.mlp.lin2",
137
+ "model.vision_tower_high.blocks.8.attn.qkv",
138
+ "model.vision_tower_high.blocks.8.attn.proj",
139
+ "model.vision_tower_high.blocks.8.mlp.lin1",
140
+ "model.vision_tower_high.blocks.8.mlp.lin2",
141
+ "model.vision_tower_high.blocks.9.attn.qkv",
142
+ "model.vision_tower_high.blocks.9.attn.proj",
143
+ "model.vision_tower_high.blocks.9.mlp.lin1",
144
+ "model.vision_tower_high.blocks.9.mlp.lin2",
145
+ "model.vision_tower_high.blocks.10.attn.qkv",
146
+ "model.vision_tower_high.blocks.10.attn.proj",
147
+ "model.vision_tower_high.blocks.10.mlp.lin1",
148
+ "model.vision_tower_high.blocks.10.mlp.lin2",
149
+ "model.vision_tower_high.blocks.11.attn.qkv",
150
+ "model.vision_tower_high.blocks.11.attn.proj",
151
+ "model.vision_tower_high.blocks.11.mlp.lin1",
152
+ "model.vision_tower_high.blocks.11.mlp.lin2",
153
+ "model.mm_projector_vary",
154
+ "lm_head"
155
+ ],
156
+ "kv_cache_scheme": null,
157
+ "quant_method": "compressed-tensors",
158
+ "quantization_status": "compressed",
159
+ "sparsity_config": {},
160
+ "transform_config": {},
161
+ "version": "0.11.0"
162
+ }
163
+
164
  }