chu88's picture
Upload folder using huggingface_hub
a3068c7 verified
{
"architectures": [
"MiniCPMO"
],
"attention_dropout": 0.0,
"audio_chunk_length": 1.0,
"audio_config": {
"activation_dropout": 0.0,
"activation_function": "gelu",
"apply_spec_augment": false,
"architectures": [
"MiniCPMWhisperEncoder"
],
"attention_dropout": 0.0,
"begin_suppress_tokens": [
220,
50257
],
"bos_token_id": 50257,
"classifier_proj_size": 256,
"d_model": 1024,
"decoder_attention_heads": 16,
"decoder_ffn_dim": 4096,
"decoder_layerdrop": 0.0,
"decoder_layers": 24,
"decoder_start_token_id": 50258,
"dropout": 0.0,
"encoder_attention_heads": 16,
"encoder_ffn_dim": 4096,
"encoder_layerdrop": 0.0,
"encoder_layers": 24,
"eos_token_id": 50257,
"forced_decoder_ids": [
[
1,
50259
],
[
2,
50359
],
[
3,
50363
]
],
"init_std": 0.02,
"mask_feature_length": 10,
"mask_feature_min_masks": 0,
"mask_feature_prob": 0.0,
"mask_time_length": 10,
"mask_time_min_masks": 2,
"mask_time_prob": 0.05,
"max_length": 448,
"max_source_positions": 1500,
"max_target_positions": 448,
"median_filter_width": 7,
"model_type": "whisper",
"num_hidden_layers": 24,
"num_mel_bins": 80,
"pad_token_id": 50257,
"scale_embedding": false,
"suppress_tokens": [
1,
2,
7,
8,
9,
10,
14,
25,
26,
27,
28,
29,
31,
58,
59,
60,
61,
62,
63,
90,
91,
92,
93,
359,
503,
522,
542,
873,
893,
902,
918,
922,
931,
1350,
1853,
1982,
2460,
2627,
3246,
3253,
3268,
3536,
3846,
3961,
4183,
4667,
6585,
6647,
7273,
9061,
9383,
10428,
10929,
11938,
12033,
12331,
12562,
13793,
14157,
14635,
15265,
15618,
16553,
16604,
18362,
18956,
20075,
21675,
22520,
26130,
26161,
26435,
28279,
29464,
31650,
32302,
32470,
36865,
42863,
47425,
49870,
50254,
50258,
50358,
50359,
50360,
50361,
50362
],
"torch_dtype": "float32",
"use_cache": true,
"use_weighted_layer_sum": false,
"vocab_size": 51865
},
"audio_pool_step": 2,
"auto_map": {
"AutoConfig": "openbmb/MiniCPM-o-2_6--configuration_minicpm.MiniCPMOConfig",
"AutoModel": "openbmb/MiniCPM-o-2_6--modeling_minicpmo.MiniCPMO",
"AutoModelForCausalLM": "openbmb/MiniCPM-o-2_6--modeling_minicpmo.MiniCPMO"
},
"batch_vision_input": true,
"bos_token_id": 151643,
"chunk_input": true,
"drop_vision_last_layer": false,
"eos_token_id": 151645,
"hidden_act": "silu",
"hidden_size": 3584,
"image_size": 448,
"init_audio": false,
"init_tts": false,
"init_vision": true,
"initializer_range": 0.02,
"intermediate_size": 18944,
"listen_speak_type": "asr",
"max_position_embeddings": 32768,
"max_window_layers": 28,
"model_type": "minicpmo",
"num_attention_heads": 28,
"num_hidden_layers": 28,
"num_key_value_heads": 4,
"patch_size": 14,
"quantization_config": {
"config_groups": {
"group_0": {
"input_activations": {
"actorder": null,
"block_structure": null,
"dynamic": true,
"group_size": null,
"num_bits": 8,
"observer": null,
"observer_kwargs": {},
"strategy": "token",
"symmetric": true,
"type": "float"
},
"output_activations": null,
"targets": [
"Linear"
],
"weights": {
"actorder": null,
"block_structure": null,
"dynamic": false,
"group_size": null,
"num_bits": 8,
"observer": "minmax",
"observer_kwargs": {},
"strategy": "channel",
"symmetric": true,
"type": "float"
}
}
},
"format": "float-quantized",
"global_compression_ratio": null,
"ignore": [
"llm.lm_head",
"vpm.encoder.layers.0.self_attn.k_proj",
"vpm.encoder.layers.0.self_attn.v_proj",
"vpm.encoder.layers.0.self_attn.q_proj",
"vpm.encoder.layers.0.self_attn.out_proj",
"vpm.encoder.layers.0.mlp.fc1",
"vpm.encoder.layers.0.mlp.fc2",
"vpm.encoder.layers.1.self_attn.k_proj",
"vpm.encoder.layers.1.self_attn.v_proj",
"vpm.encoder.layers.1.self_attn.q_proj",
"vpm.encoder.layers.1.self_attn.out_proj",
"vpm.encoder.layers.1.mlp.fc1",
"vpm.encoder.layers.1.mlp.fc2",
"vpm.encoder.layers.2.self_attn.k_proj",
"vpm.encoder.layers.2.self_attn.v_proj",
"vpm.encoder.layers.2.self_attn.q_proj",
"vpm.encoder.layers.2.self_attn.out_proj",
"vpm.encoder.layers.2.mlp.fc1",
"vpm.encoder.layers.2.mlp.fc2",
"vpm.encoder.layers.3.self_attn.k_proj",
"vpm.encoder.layers.3.self_attn.v_proj",
"vpm.encoder.layers.3.self_attn.q_proj",
"vpm.encoder.layers.3.self_attn.out_proj",
"vpm.encoder.layers.3.mlp.fc1",
"vpm.encoder.layers.3.mlp.fc2",
"vpm.encoder.layers.4.self_attn.k_proj",
"vpm.encoder.layers.4.self_attn.v_proj",
"vpm.encoder.layers.4.self_attn.q_proj",
"vpm.encoder.layers.4.self_attn.out_proj",
"vpm.encoder.layers.4.mlp.fc1",
"vpm.encoder.layers.4.mlp.fc2",
"vpm.encoder.layers.5.self_attn.k_proj",
"vpm.encoder.layers.5.self_attn.v_proj",
"vpm.encoder.layers.5.self_attn.q_proj",
"vpm.encoder.layers.5.self_attn.out_proj",
"vpm.encoder.layers.5.mlp.fc1",
"vpm.encoder.layers.5.mlp.fc2",
"vpm.encoder.layers.6.self_attn.k_proj",
"vpm.encoder.layers.6.self_attn.v_proj",
"vpm.encoder.layers.6.self_attn.q_proj",
"vpm.encoder.layers.6.self_attn.out_proj",
"vpm.encoder.layers.6.mlp.fc1",
"vpm.encoder.layers.6.mlp.fc2",
"vpm.encoder.layers.7.self_attn.k_proj",
"vpm.encoder.layers.7.self_attn.v_proj",
"vpm.encoder.layers.7.self_attn.q_proj",
"vpm.encoder.layers.7.self_attn.out_proj",
"vpm.encoder.layers.7.mlp.fc1",
"vpm.encoder.layers.7.mlp.fc2",
"vpm.encoder.layers.8.self_attn.k_proj",
"vpm.encoder.layers.8.self_attn.v_proj",
"vpm.encoder.layers.8.self_attn.q_proj",
"vpm.encoder.layers.8.self_attn.out_proj",
"vpm.encoder.layers.8.mlp.fc1",
"vpm.encoder.layers.8.mlp.fc2",
"vpm.encoder.layers.9.self_attn.k_proj",
"vpm.encoder.layers.9.self_attn.v_proj",
"vpm.encoder.layers.9.self_attn.q_proj",
"vpm.encoder.layers.9.self_attn.out_proj",
"vpm.encoder.layers.9.mlp.fc1",
"vpm.encoder.layers.9.mlp.fc2",
"vpm.encoder.layers.10.self_attn.k_proj",
"vpm.encoder.layers.10.self_attn.v_proj",
"vpm.encoder.layers.10.self_attn.q_proj",
"vpm.encoder.layers.10.self_attn.out_proj",
"vpm.encoder.layers.10.mlp.fc1",
"vpm.encoder.layers.10.mlp.fc2",
"vpm.encoder.layers.11.self_attn.k_proj",
"vpm.encoder.layers.11.self_attn.v_proj",
"vpm.encoder.layers.11.self_attn.q_proj",
"vpm.encoder.layers.11.self_attn.out_proj",
"vpm.encoder.layers.11.mlp.fc1",
"vpm.encoder.layers.11.mlp.fc2",
"vpm.encoder.layers.12.self_attn.k_proj",
"vpm.encoder.layers.12.self_attn.v_proj",
"vpm.encoder.layers.12.self_attn.q_proj",
"vpm.encoder.layers.12.self_attn.out_proj",
"vpm.encoder.layers.12.mlp.fc1",
"vpm.encoder.layers.12.mlp.fc2",
"vpm.encoder.layers.13.self_attn.k_proj",
"vpm.encoder.layers.13.self_attn.v_proj",
"vpm.encoder.layers.13.self_attn.q_proj",
"vpm.encoder.layers.13.self_attn.out_proj",
"vpm.encoder.layers.13.mlp.fc1",
"vpm.encoder.layers.13.mlp.fc2",
"vpm.encoder.layers.14.self_attn.k_proj",
"vpm.encoder.layers.14.self_attn.v_proj",
"vpm.encoder.layers.14.self_attn.q_proj",
"vpm.encoder.layers.14.self_attn.out_proj",
"vpm.encoder.layers.14.mlp.fc1",
"vpm.encoder.layers.14.mlp.fc2",
"vpm.encoder.layers.15.self_attn.k_proj",
"vpm.encoder.layers.15.self_attn.v_proj",
"vpm.encoder.layers.15.self_attn.q_proj",
"vpm.encoder.layers.15.self_attn.out_proj",
"vpm.encoder.layers.15.mlp.fc1",
"vpm.encoder.layers.15.mlp.fc2",
"vpm.encoder.layers.16.self_attn.k_proj",
"vpm.encoder.layers.16.self_attn.v_proj",
"vpm.encoder.layers.16.self_attn.q_proj",
"vpm.encoder.layers.16.self_attn.out_proj",
"vpm.encoder.layers.16.mlp.fc1",
"vpm.encoder.layers.16.mlp.fc2",
"vpm.encoder.layers.17.self_attn.k_proj",
"vpm.encoder.layers.17.self_attn.v_proj",
"vpm.encoder.layers.17.self_attn.q_proj",
"vpm.encoder.layers.17.self_attn.out_proj",
"vpm.encoder.layers.17.mlp.fc1",
"vpm.encoder.layers.17.mlp.fc2",
"vpm.encoder.layers.18.self_attn.k_proj",
"vpm.encoder.layers.18.self_attn.v_proj",
"vpm.encoder.layers.18.self_attn.q_proj",
"vpm.encoder.layers.18.self_attn.out_proj",
"vpm.encoder.layers.18.mlp.fc1",
"vpm.encoder.layers.18.mlp.fc2",
"vpm.encoder.layers.19.self_attn.k_proj",
"vpm.encoder.layers.19.self_attn.v_proj",
"vpm.encoder.layers.19.self_attn.q_proj",
"vpm.encoder.layers.19.self_attn.out_proj",
"vpm.encoder.layers.19.mlp.fc1",
"vpm.encoder.layers.19.mlp.fc2",
"vpm.encoder.layers.20.self_attn.k_proj",
"vpm.encoder.layers.20.self_attn.v_proj",
"vpm.encoder.layers.20.self_attn.q_proj",
"vpm.encoder.layers.20.self_attn.out_proj",
"vpm.encoder.layers.20.mlp.fc1",
"vpm.encoder.layers.20.mlp.fc2",
"vpm.encoder.layers.21.self_attn.k_proj",
"vpm.encoder.layers.21.self_attn.v_proj",
"vpm.encoder.layers.21.self_attn.q_proj",
"vpm.encoder.layers.21.self_attn.out_proj",
"vpm.encoder.layers.21.mlp.fc1",
"vpm.encoder.layers.21.mlp.fc2",
"vpm.encoder.layers.22.self_attn.k_proj",
"vpm.encoder.layers.22.self_attn.v_proj",
"vpm.encoder.layers.22.self_attn.q_proj",
"vpm.encoder.layers.22.self_attn.out_proj",
"vpm.encoder.layers.22.mlp.fc1",
"vpm.encoder.layers.22.mlp.fc2",
"vpm.encoder.layers.23.self_attn.k_proj",
"vpm.encoder.layers.23.self_attn.v_proj",
"vpm.encoder.layers.23.self_attn.q_proj",
"vpm.encoder.layers.23.self_attn.out_proj",
"vpm.encoder.layers.23.mlp.fc1",
"vpm.encoder.layers.23.mlp.fc2",
"vpm.encoder.layers.24.self_attn.k_proj",
"vpm.encoder.layers.24.self_attn.v_proj",
"vpm.encoder.layers.24.self_attn.q_proj",
"vpm.encoder.layers.24.self_attn.out_proj",
"vpm.encoder.layers.24.mlp.fc1",
"vpm.encoder.layers.24.mlp.fc2",
"vpm.encoder.layers.25.self_attn.k_proj",
"vpm.encoder.layers.25.self_attn.v_proj",
"vpm.encoder.layers.25.self_attn.q_proj",
"vpm.encoder.layers.25.self_attn.out_proj",
"vpm.encoder.layers.25.mlp.fc1",
"vpm.encoder.layers.25.mlp.fc2",
"vpm.encoder.layers.26.self_attn.k_proj",
"vpm.encoder.layers.26.self_attn.v_proj",
"vpm.encoder.layers.26.self_attn.q_proj",
"vpm.encoder.layers.26.self_attn.out_proj",
"vpm.encoder.layers.26.mlp.fc1",
"vpm.encoder.layers.26.mlp.fc2",
"resampler.kv_proj",
"resampler.attn.out_proj"
],
"kv_cache_scheme": null,
"quant_method": "compressed-tensors",
"quantization_status": "compressed"
},
"query_num": 64,
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 1000000.0,
"slice_config": {
"max_slice_nums": 9,
"model_type": "minicpmv",
"patch_size": 14,
"scale_resolution": 448
},
"slice_mode": true,
"sliding_window": 131072,
"stream_input": false,
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.52.0.dev0",
"tts_config": {
"attn_implementation": "sdpa",
"audio_bos_token_id": 21132,
"aug_loss_weight": true,
"hidden_size": 768,
"intermediate_size": 3072,
"llm_dim": 3584,
"max_position_embeddings": 4096,
"model_type": "conditional_chattts",
"num_attention_heads": 12,
"num_audio_tokens": 626,
"num_hidden_layers": 20,
"num_mel_bins": 100,
"num_spk_embs": 1,
"num_text_tokens": 21178,
"num_vq": 4,
"spk_emb_token_id": 21143,
"streaming": true,
"streaming_audio_chunk_size": 50,
"streaming_text_chunk_size": 10,
"streaming_text_reserved_len": 300,
"text_eos_token_id": 21133,
"use_llm_hidden_state": false,
"use_mlp": true,
"use_speaker_embedding": true,
"use_text": true
},
"use_cache": true,
"use_image_id": true,
"use_sliding_window": false,
"version": 2.6,
"vision_batch_size": 16,
"vision_config": {
"_attn_implementation_autoset": true,
"attention_dropout": 0.0,
"hidden_act": "gelu_pytorch_tanh",
"hidden_size": 1152,
"image_size": 980,
"intermediate_size": 4304,
"layer_norm_eps": 1e-06,
"model_type": "siglip_vision_model",
"num_attention_heads": 16,
"num_channels": 3,
"num_hidden_layers": 27,
"patch_size": 14
},
"vocab_size": 151700
}