mujtaba025 commited on
Commit
bfab6aa
·
verified ·
1 Parent(s): 11e358d

Upload configuration_minicpm.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configuration_minicpm.py +213 -0
configuration_minicpm.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The OpenBMB Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from typing import Union
18
+
19
+ from transformers import PretrainedConfig
20
+ from transformers import Qwen2Config
21
+ from transformers import WhisperConfig
22
+ from transformers.utils import logging
23
+
24
+ try:
25
+ from modeling_navit_siglip import SiglipVisionConfig
26
+ except ImportError:
27
+ from .modeling_navit_siglip import SiglipVisionConfig
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+
32
+ class MiniCPMVSliceConfig(PretrainedConfig):
33
+ model_type = "minicpmv"
34
+
35
+ def __init__(
36
+ self,
37
+ patch_size=14,
38
+ max_slice_nums=9,
39
+ scale_resolution=448,
40
+ **kwargs,
41
+ ):
42
+ super().__init__(**kwargs)
43
+ self.patch_size = patch_size
44
+ self.max_slice_nums = max_slice_nums
45
+ self.scale_resolution = scale_resolution
46
+
47
+ @classmethod
48
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
49
+ cls._set_token_in_kwargs(kwargs)
50
+
51
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
52
+
53
+ if config_dict.get("model_type") == "minicpmv":
54
+ config_dict = config_dict["slice_config"]
55
+
56
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
57
+ logger.warning(
58
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
59
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
60
+ )
61
+
62
+ return cls.from_dict(config_dict, **kwargs)
63
+
64
+
65
+ class ConditionalChatTTSConfig(PretrainedConfig):
66
+ model_type = "conditional_chattts"
67
+
68
+ def __init__(
69
+ self,
70
+ llm_dim: int = 2560,
71
+ hidden_size: int = 768,
72
+ intermediate_size: int = 3072,
73
+ num_attention_heads: int = 12,
74
+ num_hidden_layers: int = 20,
75
+ max_position_embeddings: int = 4096,
76
+ num_audio_tokens: int = 626,
77
+ num_text_tokens: int = 21178,
78
+ num_mel_bins: int = 100,
79
+ num_vq: int = 4,
80
+ use_speaker_embedding: bool = True,
81
+ use_llm_hidden_state: bool = False,
82
+ spk_emb_token_id: int = 21143,
83
+ num_spk_embs: int = 1,
84
+ audio_bos_token_id: int = 21132,
85
+ text_eos_token_id: int = 21133,
86
+ use_text: bool = True,
87
+ streaming: bool = True,
88
+ streaming_text_chunk_size: int = 10,
89
+ streaming_text_reserved_len: int = 300,
90
+ streaming_audio_chunk_size: int = 50,
91
+ attn_implementation: str = "sdpa",
92
+ use_mlp: bool = True,
93
+ aug_loss_weight: bool = True,
94
+ do_sample: bool = True,
95
+ top_p: float = 0.7,
96
+ top_k: int = 20,
97
+ repetition_penalty: float = 1.0,
98
+ **kwargs,
99
+ ):
100
+ super().__init__(**kwargs)
101
+
102
+ self.llm_dim = llm_dim
103
+ self.hidden_size = hidden_size
104
+ self.intermediate_size = intermediate_size
105
+ self.num_attention_heads = num_attention_heads
106
+ self.num_hidden_layers = num_hidden_layers
107
+ self.max_position_embeddings = max_position_embeddings
108
+ self.num_audio_tokens = num_audio_tokens
109
+ self.num_text_tokens = num_text_tokens
110
+ self.num_mel_bins = num_mel_bins
111
+ self.num_vq = num_vq
112
+ self.use_speaker_embedding = use_speaker_embedding
113
+ self.use_llm_hidden_state = use_llm_hidden_state
114
+ self.spk_emb_token_id = spk_emb_token_id
115
+ self.num_spk_embs = num_spk_embs
116
+ self.audio_bos_token_id = audio_bos_token_id
117
+ self.text_eos_token_id = text_eos_token_id
118
+ self.use_text = use_text
119
+ self.streaming = streaming
120
+ self.streaming_text_chunk_size = streaming_text_chunk_size
121
+ self.streaming_text_reserved_len = streaming_text_reserved_len
122
+ self.streaming_audio_chunk_size = streaming_audio_chunk_size
123
+ self.attn_implementation = attn_implementation
124
+ self.use_mlp = use_mlp
125
+ self.aug_loss_weight = aug_loss_weight
126
+ self.do_sample = do_sample
127
+ self.top_p = top_p
128
+ self.top_k = top_k
129
+ self.repetition_penalty = repetition_penalty
130
+
131
+
132
+ class MiniCPMOConfig(Qwen2Config):
133
+ model_type = "minicpmo"
134
+ keys_to_ignore_at_inference = ["past_key_values"]
135
+
136
+ default_vision_config = {
137
+ "hidden_size": 1152,
138
+ "image_size": 980,
139
+ "intermediate_size": 4304,
140
+ "model_type": "siglip",
141
+ "num_attention_heads": 16,
142
+ "num_hidden_layers": 27,
143
+ "patch_size": 14,
144
+ }
145
+
146
+ def __init__(
147
+ self,
148
+ use_cache=True,
149
+ query_num=64,
150
+ image_size=448,
151
+ drop_vision_last_layer=True,
152
+ batch_vision_input=True,
153
+ slice_config=None,
154
+ vision_config=None,
155
+ audio_config=None,
156
+ tts_config=None,
157
+ use_image_id=True,
158
+ vision_batch_size=16,
159
+ audio_pool_step=2,
160
+ audio_chunk_length=1.0,
161
+ stream_input=False,
162
+ init_vision=True,
163
+ init_audio=True,
164
+ init_tts=True,
165
+ **kwargs,
166
+ ):
167
+ self.use_cache = use_cache
168
+ self.query_num = query_num
169
+ self.image_size = image_size
170
+ self.drop_vision_last_layer = drop_vision_last_layer
171
+ self.batch_vision_input = batch_vision_input
172
+ self.use_image_id = use_image_id
173
+ self.vision_batch_size = vision_batch_size
174
+ self.audio_pool_step = audio_pool_step
175
+ self.audio_chunk_length = audio_chunk_length
176
+ self.stream_input = stream_input
177
+ self.init_vision = init_vision
178
+ self.init_audio = init_audio
179
+ self.init_tts = init_tts
180
+
181
+ if slice_config is None:
182
+ self.slice_config = MiniCPMVSliceConfig(max_slice_nums=1)
183
+ else:
184
+ self.slice_config = MiniCPMVSliceConfig(**slice_config)
185
+ self.slice_mode = True
186
+
187
+ # same as HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit add tgt_sizes
188
+ if vision_config is None:
189
+ self.vision_config = SiglipVisionConfig(**self.default_vision_config)
190
+ logger.info("vision_config is None, using default vision config")
191
+ elif isinstance(vision_config, dict):
192
+ self.vision_config = SiglipVisionConfig(**vision_config)
193
+ elif isinstance(vision_config, SiglipVisionConfig):
194
+ self.vision_config = vision_config
195
+
196
+ # same as openai/whisper-medium add use_cache
197
+ if audio_config is None:
198
+ self.audio_config = WhisperConfig()
199
+ elif isinstance(audio_config, dict):
200
+ self.audio_config = WhisperConfig(**audio_config)
201
+ elif isinstance(audio_config, WhisperConfig):
202
+ self.audio_config = audio_config
203
+
204
+ if tts_config is None:
205
+ self.tts_config = ConditionalChatTTSConfig()
206
+ elif isinstance(tts_config, dict):
207
+ self.tts_config = ConditionalChatTTSConfig(**tts_config)
208
+ elif isinstance(tts_config, ConditionalChatTTSConfig):
209
+ self.tts_config = tts_config
210
+
211
+ self.patch_size = self.vision_config.patch_size
212
+
213
+ super().__init__(**kwargs)