MiniCPM-o-4.5-nvidia-FlagOS / configuration_minicpmtts.py
YummyYum's picture
Upload folder using huggingface_hub
be99bcf verified
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from transformers.configuration_utils import PretrainedConfig
class MiniCPMTTSConfig(PretrainedConfig):
model_type = "minicpmtts"
def __init__(
self,
llm_dim: int = 2560,
llm_intermediate_size: int = 768,
llm_down_scale: bool = False,
llm_dim_model_base: int = 256,
projector_type: str = "mlp",
hidden_act: str = "silu",
aug_loss_weight: bool = False,
aug_layer_loss_weight: bool = False,
filter_tts_loss: bool = False,
tts_filter_loss_fix: bool = False,
long_weight: float = 0.1,
short_weight: float = 0.1,
hidden_size: int = 768,
intermediate_size: int = 3072,
num_attention_heads: int = 12,
num_hidden_layers: int = 20,
num_key_value_heads: int = 12,
max_position_embeddings: int = 4096,
num_audio_tokens: int = 4097,
num_text_tokens: int = 21178,
num_mel_bins: int = 100,
num_vq: int = 1,
use_llm_hidden_state: bool = False,
audio_bos_token_id: int = 21132,
text_eos_token_id: int = 21133,
use_text: bool = True,
streaming: bool = False,
streaming_text_chunk_min: int = 3,
streaming_text_chunk_max: int = 7,
streaming_text_reserved_len: int = 300,
streaming_audio_chunk_size: int = 50,
attn_implementation: str = "sdpa",
condition_type: str = "llm_hidden",
backbone_model: str = "llama",
audio_tokenizer_type: str = "wavtokenizer",
audio_tokenizer_sample_rate: int = 24000,
streaming_sliding_window: bool = False,
streaming_sliding_window_max_text_len: int = 500,
streaming_sliding_window_average_speed: int = 5,
streaming_sliding_window_fast_speed: int = 7,
streaming_sliding_window_slow_speed: int = 3,
streaming_sliding_window_audio_frame_rate: int = 50,
streaming_sliding_window_text_window_size: int = 50,
streaming_sliding_window_audio_init_text_length: int = 10,
streaming_sliding_window_audio_window_size: int = 300,
normalize_projected_hidden: bool = False,
interleaved: bool = False,
attention_type: str = "sliding_recompute",
recomputed_chunks: int = 1,
window_size: int = 2,
**kwargs,
):
super().__init__(**kwargs)
self.llm_dim = llm_dim
self.llm_hidden_size = llm_dim
self.llm_intermediate_size = llm_intermediate_size
self.llm_down_scale = llm_down_scale
self.llm_dim_model_base = llm_dim_model_base
self.projector_type = projector_type
self.aug_loss_weight = aug_loss_weight
self.aug_layer_loss_weight = aug_layer_loss_weight
self.tts_filter_loss_fix = tts_filter_loss_fix
self.filter_tts_loss = filter_tts_loss
self.long_weight = long_weight
self.short_weight = short_weight
self.hidden_act = hidden_act
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.num_key_value_heads = num_key_value_heads
self.max_position_embeddings = max_position_embeddings
self.num_audio_tokens = num_audio_tokens
self.num_text_tokens = num_text_tokens
self.num_mel_bins = num_mel_bins
self.num_vq = num_vq
self.use_llm_hidden_state = use_llm_hidden_state
self.audio_bos_token_id = audio_bos_token_id
self.text_eos_token_id = text_eos_token_id
self.use_text = use_text
self.streaming = streaming
self.streaming_text_chunk_min = streaming_text_chunk_min
self.streaming_text_chunk_max = streaming_text_chunk_max
self.streaming_text_reserved_len = streaming_text_reserved_len
self.streaming_audio_chunk_size = streaming_audio_chunk_size
self.attn_implementation = attn_implementation
self.condition_type = condition_type
self.backbone_model = backbone_model
self.audio_tokenizer_type = audio_tokenizer_type
self.audio_tokenizer_sample_rate = audio_tokenizer_sample_rate
self.streaming_sliding_window = streaming_sliding_window
self.streaming_sliding_window_max_text_len = streaming_sliding_window_max_text_len
self.streaming_sliding_window_average_speed = streaming_sliding_window_average_speed
self.streaming_sliding_window_fast_speed = streaming_sliding_window_fast_speed
self.streaming_sliding_window_slow_speed = streaming_sliding_window_slow_speed
self.streaming_sliding_window_audio_frame_rate = streaming_sliding_window_audio_frame_rate
self.streaming_sliding_window_text_window_size = streaming_sliding_window_text_window_size
self.streaming_sliding_window_audio_init_text_length = streaming_sliding_window_audio_init_text_length
self.streaming_sliding_window_audio_window_size = streaming_sliding_window_audio_window_size
self.normalize_projected_hidden = normalize_projected_hidden
self.interleaved = interleaved
self.attention_type = attention_type
self.recomputed_chunks = recomputed_chunks
self.window_size = window_size