Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- asr_config.py +184 -0
- asr_modeling.py +808 -0
- asr_pipeline.py +519 -0
- asr_processing.py +121 -0
- chat_template.jinja +89 -0
- config.json +197 -0
- generation_config.json +15 -0
- model.safetensors +3 -0
- preprocessor_config.json +18 -0
- projectors.py +450 -0
- tokenizer.json +3 -0
- tokenizer_config.json +17 -0
- training_args.bin +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
asr_config.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
import transformers
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class ASRConfig(transformers.PretrainedConfig):
|
| 7 |
+
model_type = "asr_model"
|
| 8 |
+
is_composition = True
|
| 9 |
+
|
| 10 |
+
def __init__(
|
| 11 |
+
self,
|
| 12 |
+
audio_model_id: str = "openai/whisper-large-v3-turbo",
|
| 13 |
+
text_model_id: str = "HuggingFaceTB/SmolLM3-3B",
|
| 14 |
+
attn_implementation: str = "flash_attention_2",
|
| 15 |
+
model_dtype: str = "bfloat16",
|
| 16 |
+
num_beams: Optional[int] = None,
|
| 17 |
+
system_prompt: str = "You are a helpful assistant.",
|
| 18 |
+
user_prompt: str = "Please transcribe this English audio into text: <audio>",
|
| 19 |
+
encoder_dim: Optional[int] = None,
|
| 20 |
+
llm_dim: Optional[int] = None,
|
| 21 |
+
# Encoder conv layers: list of (padding, kernel_size, stride) tuples
|
| 22 |
+
# Default is Whisper/GLM-ASR structure: conv1(k=3,s=1,p=1) + conv2(k=3,s=2,p=1)
|
| 23 |
+
encoder_conv_layers: Optional[list] = None,
|
| 24 |
+
audio_sample_rate: int = 16000,
|
| 25 |
+
projector_pool_stride: int = 4,
|
| 26 |
+
downsample_rate: int = 5, # Granite default
|
| 27 |
+
projector_hidden_dim: Optional[int] = None,
|
| 28 |
+
projector_type: str = "mlp", # "mlp", "mosa", "moe", "qformer"
|
| 29 |
+
projector_num_layers: int = 2, # Number of layers in MLP projector
|
| 30 |
+
projector_init_std: float = 0.02, # Weight initialization std
|
| 31 |
+
projector_dropout: float = 0.0, # Dropout rate for projector layers
|
| 32 |
+
# MoE-specific configuration
|
| 33 |
+
num_experts: int = 4, # Number of experts in MoE projectors
|
| 34 |
+
num_experts_per_tok: int = 2, # Top-k experts per token
|
| 35 |
+
router_aux_loss_coef: float = 0.01, # Auxiliary loss coefficient for load balancing
|
| 36 |
+
# QFormer-specific configuration (Granite defaults)
|
| 37 |
+
qformer_window_size: int = 15, # Window size for QFormer processing
|
| 38 |
+
qformer_hidden_size: Optional[int] = None, # QFormer hidden size (defaults to encoder_dim)
|
| 39 |
+
qformer_num_layers: int = 2, # Number of QFormer transformer layers
|
| 40 |
+
qformer_num_heads: int = 16, # Number of attention heads in QFormer
|
| 41 |
+
qformer_intermediate_size: Optional[int] = None, # FFN size (defaults to 4x hidden)
|
| 42 |
+
label_smoothing: float = 0.0, # Label smoothing for cross-entropy loss
|
| 43 |
+
inference_warmup_tokens: int = 10,
|
| 44 |
+
# SpecAugment settings (Whisper defaults)
|
| 45 |
+
use_specaugment: bool = False,
|
| 46 |
+
mask_time_prob: float = 0.05, # Probability of masking time steps
|
| 47 |
+
mask_time_length: int = 10, # Max length of time mask
|
| 48 |
+
mask_time_min_masks: int = 2, # Min number of time masks
|
| 49 |
+
mask_feature_prob: float = 0.0, # Probability of masking frequency bins (disabled by default)
|
| 50 |
+
mask_feature_length: int = 10, # Max length of frequency mask
|
| 51 |
+
mask_feature_min_masks: int = 0, # Min number of frequency masks
|
| 52 |
+
max_new_tokens: Optional[int] = None,
|
| 53 |
+
min_new_tokens: Optional[int] = None,
|
| 54 |
+
repetition_penalty: Optional[float] = None,
|
| 55 |
+
length_penalty: Optional[float] = None,
|
| 56 |
+
no_repeat_ngram_size: Optional[int] = None,
|
| 57 |
+
use_cache: Optional[bool] = None,
|
| 58 |
+
**kwargs,
|
| 59 |
+
):
|
| 60 |
+
# Set default generation parameters (greedy decoding only)
|
| 61 |
+
generation_defaults = {
|
| 62 |
+
"num_beams": 1,
|
| 63 |
+
"max_new_tokens": 256,
|
| 64 |
+
"min_new_tokens": 0,
|
| 65 |
+
"repetition_penalty": 1.0,
|
| 66 |
+
"length_penalty": 1.0,
|
| 67 |
+
"no_repeat_ngram_size": 0,
|
| 68 |
+
"use_cache": True,
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
# Apply defaults (config.json values take precedence)
|
| 72 |
+
kwargs = {**generation_defaults, **kwargs}
|
| 73 |
+
|
| 74 |
+
self.audio_model_id = audio_model_id
|
| 75 |
+
self.text_model_id = text_model_id
|
| 76 |
+
self.attn_implementation = attn_implementation
|
| 77 |
+
self.model_dtype = model_dtype
|
| 78 |
+
self.system_prompt = system_prompt
|
| 79 |
+
self.user_prompt = user_prompt
|
| 80 |
+
self.encoder_dim = encoder_dim
|
| 81 |
+
self.llm_dim = llm_dim
|
| 82 |
+
# Default conv layers for Whisper/GLM-ASR: [(pad, kernel, stride), ...]
|
| 83 |
+
self.encoder_conv_layers = encoder_conv_layers or [(1, 3, 1), (1, 3, 2)]
|
| 84 |
+
self.audio_sample_rate = audio_sample_rate
|
| 85 |
+
self.projector_init_std = projector_init_std
|
| 86 |
+
self.projector_pool_stride = projector_pool_stride
|
| 87 |
+
self.downsample_rate = downsample_rate
|
| 88 |
+
self.projector_hidden_dim = projector_hidden_dim
|
| 89 |
+
self.projector_type = projector_type
|
| 90 |
+
self.projector_num_layers = projector_num_layers
|
| 91 |
+
self.projector_dropout = projector_dropout
|
| 92 |
+
# MoE-specific configuration
|
| 93 |
+
self.num_experts = num_experts
|
| 94 |
+
self.num_experts_per_tok = num_experts_per_tok
|
| 95 |
+
self.router_aux_loss_coef = router_aux_loss_coef
|
| 96 |
+
# QFormer-specific configuration
|
| 97 |
+
self.qformer_window_size = qformer_window_size
|
| 98 |
+
self.qformer_hidden_size = qformer_hidden_size
|
| 99 |
+
self.qformer_num_layers = qformer_num_layers
|
| 100 |
+
self.qformer_num_heads = qformer_num_heads
|
| 101 |
+
self.qformer_intermediate_size = qformer_intermediate_size
|
| 102 |
+
self.label_smoothing = label_smoothing
|
| 103 |
+
self.inference_warmup_tokens = inference_warmup_tokens
|
| 104 |
+
# SpecAugment configuration
|
| 105 |
+
self.use_specaugment = use_specaugment
|
| 106 |
+
self.mask_time_prob = mask_time_prob
|
| 107 |
+
self.mask_time_length = mask_time_length
|
| 108 |
+
self.mask_time_min_masks = mask_time_min_masks
|
| 109 |
+
self.mask_feature_prob = mask_feature_prob
|
| 110 |
+
self.mask_feature_length = mask_feature_length
|
| 111 |
+
self.mask_feature_min_masks = mask_feature_min_masks
|
| 112 |
+
|
| 113 |
+
# Generation parameters (use explicit value if provided, else use default)
|
| 114 |
+
self.num_beams = num_beams if num_beams is not None else generation_defaults["num_beams"]
|
| 115 |
+
self.max_new_tokens = (
|
| 116 |
+
max_new_tokens if max_new_tokens is not None else generation_defaults["max_new_tokens"]
|
| 117 |
+
)
|
| 118 |
+
self.min_new_tokens = (
|
| 119 |
+
min_new_tokens if min_new_tokens is not None else generation_defaults["min_new_tokens"]
|
| 120 |
+
)
|
| 121 |
+
self.repetition_penalty = (
|
| 122 |
+
repetition_penalty
|
| 123 |
+
if repetition_penalty is not None
|
| 124 |
+
else generation_defaults["repetition_penalty"]
|
| 125 |
+
)
|
| 126 |
+
self.length_penalty = (
|
| 127 |
+
length_penalty if length_penalty is not None else generation_defaults["length_penalty"]
|
| 128 |
+
)
|
| 129 |
+
self.no_repeat_ngram_size = (
|
| 130 |
+
no_repeat_ngram_size
|
| 131 |
+
if no_repeat_ngram_size is not None
|
| 132 |
+
else generation_defaults["no_repeat_ngram_size"]
|
| 133 |
+
)
|
| 134 |
+
self.use_cache = use_cache if use_cache is not None else generation_defaults["use_cache"]
|
| 135 |
+
|
| 136 |
+
if "audio_config" not in kwargs:
|
| 137 |
+
self.audio_config = transformers.AutoConfig.from_pretrained(audio_model_id)
|
| 138 |
+
# Override dtype to match model_dtype
|
| 139 |
+
self.audio_config.dtype = model_dtype
|
| 140 |
+
else:
|
| 141 |
+
self.audio_config = kwargs.pop("audio_config")
|
| 142 |
+
|
| 143 |
+
if "text_config" not in kwargs:
|
| 144 |
+
self.text_config = transformers.AutoConfig.from_pretrained(
|
| 145 |
+
text_model_id, trust_remote_code=True
|
| 146 |
+
)
|
| 147 |
+
# Override dtype to match model_dtype
|
| 148 |
+
self.text_config.dtype = model_dtype
|
| 149 |
+
else:
|
| 150 |
+
self.text_config = kwargs.pop("text_config")
|
| 151 |
+
|
| 152 |
+
if isinstance(self.text_config, dict):
|
| 153 |
+
# Reconstruct config from dict using the model_type stored in the dict
|
| 154 |
+
model_type = self.text_config["model_type"]
|
| 155 |
+
config_class = transformers.AutoConfig.for_model(model_type).__class__
|
| 156 |
+
self.text_config = config_class(**self.text_config)
|
| 157 |
+
|
| 158 |
+
if isinstance(self.audio_config, dict):
|
| 159 |
+
model_type = self.audio_config.get("model_type")
|
| 160 |
+
if model_type:
|
| 161 |
+
config_class = transformers.AutoConfig.for_model(model_type).__class__
|
| 162 |
+
self.audio_config = config_class(**self.audio_config)
|
| 163 |
+
|
| 164 |
+
super().__init__(**kwargs)
|
| 165 |
+
|
| 166 |
+
self.auto_map = {
|
| 167 |
+
"AutoConfig": "asr_config.ASRConfig",
|
| 168 |
+
"AutoModel": "asr_modeling.ASRModel",
|
| 169 |
+
"AutoModelForSpeechSeq2Seq": "asr_modeling.ASRModel",
|
| 170 |
+
"AutoProcessor": "asr_processing.ASRProcessor",
|
| 171 |
+
}
|
| 172 |
+
self.custom_pipelines = {
|
| 173 |
+
"automatic-speech-recognition": {
|
| 174 |
+
"impl": "asr_pipeline.ASRPipeline",
|
| 175 |
+
"pt": ["AutoModelForSpeechSeq2Seq"],
|
| 176 |
+
"tf": [],
|
| 177 |
+
"type": "audio",
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
self.architectures = ["ASRModel"]
|
| 181 |
+
self.pipeline_tag = "automatic-speech-recognition"
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
transformers.AutoConfig.register("asr_model", ASRConfig)
|
asr_modeling.py
ADDED
|
@@ -0,0 +1,808 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from threading import Thread
|
| 4 |
+
from typing import Iterator, Optional, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
from transformers import (
|
| 9 |
+
AutoConfig,
|
| 10 |
+
AutoModel,
|
| 11 |
+
AutoModelForCausalLM,
|
| 12 |
+
AutoTokenizer,
|
| 13 |
+
PreTrainedModel,
|
| 14 |
+
TextIteratorStreamer,
|
| 15 |
+
)
|
| 16 |
+
from transformers.generation import GenerationMixin
|
| 17 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
from .asr_config import ASRConfig
|
| 21 |
+
from .projectors import PROJECTOR_CLASSES
|
| 22 |
+
except ImportError:
|
| 23 |
+
from asr_config import ASRConfig # type: ignore[no-redef]
|
| 24 |
+
from projectors import PROJECTOR_CLASSES # type: ignore[no-redef]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _compute_mask_indices(
|
| 28 |
+
shape: tuple[int, int],
|
| 29 |
+
mask_prob: float,
|
| 30 |
+
mask_length: int,
|
| 31 |
+
min_masks: int = 0,
|
| 32 |
+
device: torch.device = None,
|
| 33 |
+
) -> torch.Tensor:
|
| 34 |
+
"""Compute random mask spans for SpecAugment.
|
| 35 |
+
|
| 36 |
+
Based on transformers' _compute_mask_indices for Wav2Vec2/Whisper.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
shape: (batch_size, sequence_length)
|
| 40 |
+
mask_prob: Probability for each token to be chosen as start of mask span
|
| 41 |
+
mask_length: Maximum length of mask span
|
| 42 |
+
min_masks: Minimum number of masks per sample
|
| 43 |
+
device: Device to create tensor on
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
Boolean mask tensor of shape (batch_size, sequence_length)
|
| 47 |
+
"""
|
| 48 |
+
batch_size, sequence_length = shape
|
| 49 |
+
|
| 50 |
+
if mask_length < 1:
|
| 51 |
+
raise ValueError(f"mask_length must be >= 1, got {mask_length}")
|
| 52 |
+
|
| 53 |
+
if mask_length > sequence_length:
|
| 54 |
+
raise ValueError(f"mask_length {mask_length} must be <= sequence_length {sequence_length}")
|
| 55 |
+
|
| 56 |
+
# Compute number of masked spans per sample
|
| 57 |
+
num_masked_spans = int(mask_prob * sequence_length / mask_length + torch.rand(1).item())
|
| 58 |
+
num_masked_spans = max(num_masked_spans, min_masks)
|
| 59 |
+
|
| 60 |
+
# Clamp to ensure we don't exceed sequence length
|
| 61 |
+
if num_masked_spans * mask_length > sequence_length:
|
| 62 |
+
num_masked_spans = sequence_length // mask_length
|
| 63 |
+
|
| 64 |
+
if num_masked_spans == 0:
|
| 65 |
+
return torch.zeros((batch_size, sequence_length), dtype=torch.bool, device=device)
|
| 66 |
+
|
| 67 |
+
# Uniformly sample span start indices
|
| 68 |
+
mask = torch.zeros((batch_size, sequence_length), dtype=torch.bool, device=device)
|
| 69 |
+
|
| 70 |
+
for i in range(batch_size):
|
| 71 |
+
# Random start indices for this sample
|
| 72 |
+
spec_aug_start_indices = torch.randint(
|
| 73 |
+
0, sequence_length - mask_length + 1, (num_masked_spans,), device=device
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# Create mask spans
|
| 77 |
+
for start_idx in spec_aug_start_indices:
|
| 78 |
+
mask[i, start_idx : start_idx + mask_length] = True
|
| 79 |
+
|
| 80 |
+
return mask
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def apply_specaugment(
|
| 84 |
+
input_features: torch.Tensor,
|
| 85 |
+
mask_time_prob: float = 0.05,
|
| 86 |
+
mask_time_length: int = 10,
|
| 87 |
+
mask_time_min_masks: int = 2,
|
| 88 |
+
mask_feature_prob: float = 0.0,
|
| 89 |
+
mask_feature_length: int = 10,
|
| 90 |
+
mask_feature_min_masks: int = 0,
|
| 91 |
+
) -> torch.Tensor:
|
| 92 |
+
"""Apply SpecAugment to mel spectrogram features.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
input_features: Mel spectrogram of shape (batch, n_mels, time)
|
| 96 |
+
mask_time_prob: Probability of masking time steps
|
| 97 |
+
mask_time_length: Max length of time mask
|
| 98 |
+
mask_time_min_masks: Min number of time masks
|
| 99 |
+
mask_feature_prob: Probability of masking frequency bins
|
| 100 |
+
mask_feature_length: Max length of frequency mask
|
| 101 |
+
mask_feature_min_masks: Min number of frequency masks
|
| 102 |
+
|
| 103 |
+
Returns:
|
| 104 |
+
Augmented mel spectrogram with same shape
|
| 105 |
+
"""
|
| 106 |
+
batch_size, n_mels, time_steps = input_features.shape
|
| 107 |
+
device = input_features.device
|
| 108 |
+
|
| 109 |
+
# Clone to avoid modifying original
|
| 110 |
+
augmented = input_features.clone()
|
| 111 |
+
|
| 112 |
+
# Time masking (along time dimension)
|
| 113 |
+
# Apply if prob > 0 OR min_masks > 0 (to support fixed mask count with prob=0)
|
| 114 |
+
if mask_time_prob > 0 or mask_time_min_masks > 0:
|
| 115 |
+
time_mask = _compute_mask_indices(
|
| 116 |
+
shape=(batch_size, time_steps),
|
| 117 |
+
mask_prob=mask_time_prob,
|
| 118 |
+
mask_length=mask_time_length,
|
| 119 |
+
min_masks=mask_time_min_masks,
|
| 120 |
+
device=device,
|
| 121 |
+
)
|
| 122 |
+
# Expand to (batch, 1, time) for broadcasting
|
| 123 |
+
time_mask = time_mask.unsqueeze(1)
|
| 124 |
+
augmented = augmented.masked_fill(time_mask, 0.0)
|
| 125 |
+
|
| 126 |
+
# Frequency masking (along mel dimension)
|
| 127 |
+
# Apply if prob > 0 OR min_masks > 0 (to support fixed mask count with prob=0)
|
| 128 |
+
if mask_feature_prob > 0 or mask_feature_min_masks > 0:
|
| 129 |
+
feature_mask = _compute_mask_indices(
|
| 130 |
+
shape=(batch_size, n_mels),
|
| 131 |
+
mask_prob=mask_feature_prob,
|
| 132 |
+
mask_length=mask_feature_length,
|
| 133 |
+
min_masks=mask_feature_min_masks,
|
| 134 |
+
device=device,
|
| 135 |
+
)
|
| 136 |
+
# Expand to (batch, n_mels, 1) for broadcasting
|
| 137 |
+
feature_mask = feature_mask.unsqueeze(2)
|
| 138 |
+
augmented = augmented.masked_fill(feature_mask, 0.0)
|
| 139 |
+
|
| 140 |
+
return augmented
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class ASRModel(PreTrainedModel, GenerationMixin):
|
| 144 |
+
"""Audio-to-text model combining an audio encoder, projector, and language model."""
|
| 145 |
+
|
| 146 |
+
config_class = ASRConfig
|
| 147 |
+
base_model_prefix = "model"
|
| 148 |
+
main_input_name = "input_features"
|
| 149 |
+
_supports_flash_attn_2 = True
|
| 150 |
+
supports_gradient_checkpointing = True
|
| 151 |
+
_is_loading_from_pretrained: bool = False
|
| 152 |
+
_pretrained_model_path: Optional[str] = None
|
| 153 |
+
|
| 154 |
+
TRANSCRIBE_PROMPT = "Transcribe: "
|
| 155 |
+
|
| 156 |
+
@classmethod
|
| 157 |
+
def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
|
| 158 |
+
"""Load model from pretrained, handling device placement correctly."""
|
| 159 |
+
from safetensors.torch import load_file
|
| 160 |
+
from transformers.utils.hub import cached_file
|
| 161 |
+
|
| 162 |
+
config = kwargs.pop("config", None)
|
| 163 |
+
if config is None:
|
| 164 |
+
config = ASRConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
|
| 165 |
+
|
| 166 |
+
# Set flag to avoid device_map="auto" in sub-model loaders
|
| 167 |
+
cls._is_loading_from_pretrained = True
|
| 168 |
+
cls._pretrained_model_path = pretrained_model_name_or_path
|
| 169 |
+
|
| 170 |
+
try:
|
| 171 |
+
model = cls(config, **kwargs)
|
| 172 |
+
|
| 173 |
+
# Load projector weights from safetensors
|
| 174 |
+
subfolder = kwargs.get("subfolder")
|
| 175 |
+
revision = kwargs.get("revision")
|
| 176 |
+
cache_kwargs = {}
|
| 177 |
+
if subfolder:
|
| 178 |
+
cache_kwargs["subfolder"] = subfolder
|
| 179 |
+
if revision:
|
| 180 |
+
cache_kwargs["revision"] = revision
|
| 181 |
+
|
| 182 |
+
model_file = cached_file(
|
| 183 |
+
pretrained_model_name_or_path,
|
| 184 |
+
"model.safetensors",
|
| 185 |
+
_raise_exceptions_for_missing_entries=False,
|
| 186 |
+
**cache_kwargs,
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
if model_file is not None:
|
| 190 |
+
state_dict = load_file(model_file)
|
| 191 |
+
model.load_state_dict(state_dict, strict=False)
|
| 192 |
+
|
| 193 |
+
return model
|
| 194 |
+
finally:
|
| 195 |
+
cls._is_loading_from_pretrained = False
|
| 196 |
+
cls._pretrained_model_path = None
|
| 197 |
+
|
| 198 |
+
def __init__(self, config: ASRConfig, **kwargs):
|
| 199 |
+
super().__init__(config)
|
| 200 |
+
|
| 201 |
+
self.system_prompt = config.system_prompt
|
| 202 |
+
target_dtype = getattr(torch, config.model_dtype)
|
| 203 |
+
|
| 204 |
+
# Audio encoder (frozen)
|
| 205 |
+
self.audio_tower = self._load_audio_encoder(config, target_dtype)
|
| 206 |
+
|
| 207 |
+
# Language model (frozen)
|
| 208 |
+
self.language_model = self._load_language_model(config, target_dtype)
|
| 209 |
+
|
| 210 |
+
# Initialize tokenizer and special tokens
|
| 211 |
+
self._init_tokenizer(config)
|
| 212 |
+
|
| 213 |
+
# Set up generation config with greedy decoding defaults
|
| 214 |
+
self.generation_config = self.language_model.generation_config
|
| 215 |
+
self.generation_config.max_new_tokens = config.max_new_tokens
|
| 216 |
+
self.generation_config.min_new_tokens = config.min_new_tokens
|
| 217 |
+
self.generation_config.num_beams = config.num_beams
|
| 218 |
+
self.generation_config.do_sample = False
|
| 219 |
+
# Clear sampling params (inherited from LLM) since we use greedy decoding
|
| 220 |
+
self.generation_config.temperature = None
|
| 221 |
+
self.generation_config.top_p = None
|
| 222 |
+
self.generation_config.top_k = None
|
| 223 |
+
self.generation_config.use_cache = config.use_cache
|
| 224 |
+
self.generation_config.length_penalty = config.length_penalty
|
| 225 |
+
self.generation_config.repetition_penalty = config.repetition_penalty
|
| 226 |
+
self.generation_config.no_repeat_ngram_size = config.no_repeat_ngram_size
|
| 227 |
+
self.generation_config.eos_token_id = [
|
| 228 |
+
self.tokenizer.convert_tokens_to_ids("<|im_end|>"),
|
| 229 |
+
self.tokenizer.convert_tokens_to_ids("<|endoftext|>"),
|
| 230 |
+
]
|
| 231 |
+
self.generation_config.pad_token_id = self.tokenizer.pad_token_id
|
| 232 |
+
|
| 233 |
+
# Feature extractor for audio preprocessing
|
| 234 |
+
self.feature_extractor = self._create_feature_extractor(config)
|
| 235 |
+
|
| 236 |
+
# Audio projector (trainable)
|
| 237 |
+
self.projector = self._create_projector(config, target_dtype)
|
| 238 |
+
|
| 239 |
+
# For model parallelism
|
| 240 |
+
self._no_split_modules = getattr(self.language_model, "_no_split_modules", [])
|
| 241 |
+
|
| 242 |
+
def _create_feature_extractor(self, config: ASRConfig):
|
| 243 |
+
"""Create the appropriate feature extractor for the audio encoder."""
|
| 244 |
+
from transformers import AutoFeatureExtractor
|
| 245 |
+
|
| 246 |
+
return AutoFeatureExtractor.from_pretrained(config.audio_model_id)
|
| 247 |
+
|
| 248 |
+
@classmethod
|
| 249 |
+
def _load_audio_encoder(cls, config: ASRConfig, dtype: torch.dtype) -> nn.Module:
|
| 250 |
+
"""Load and freeze the audio encoder."""
|
| 251 |
+
encoder_kwargs = {
|
| 252 |
+
"attn_implementation": config.attn_implementation,
|
| 253 |
+
"low_cpu_mem_usage": True,
|
| 254 |
+
"dtype": dtype,
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
if "whisper" in config.audio_model_id.lower():
|
| 258 |
+
from transformers import WhisperModel
|
| 259 |
+
|
| 260 |
+
full_model = WhisperModel.from_pretrained(config.audio_model_id, **encoder_kwargs)
|
| 261 |
+
encoder = full_model.encoder
|
| 262 |
+
del full_model
|
| 263 |
+
elif "glm" in config.audio_model_id.lower():
|
| 264 |
+
# GLM-ASR models use audio_tower as the encoder
|
| 265 |
+
# Requires transformers >= 5.x or installed from source
|
| 266 |
+
from transformers import AutoModelForSeq2SeqLM
|
| 267 |
+
|
| 268 |
+
full_model = AutoModelForSeq2SeqLM.from_pretrained(
|
| 269 |
+
config.audio_model_id, trust_remote_code=True, **encoder_kwargs
|
| 270 |
+
)
|
| 271 |
+
# GLM stores encoder at audio_tower (GlmAsrEncoder)
|
| 272 |
+
encoder = full_model.audio_tower
|
| 273 |
+
# Clear references to free VRAM from the LLM decoder
|
| 274 |
+
full_model.language_model = None
|
| 275 |
+
full_model.multi_modal_projector = None
|
| 276 |
+
del full_model
|
| 277 |
+
if torch.cuda.is_available():
|
| 278 |
+
torch.cuda.empty_cache()
|
| 279 |
+
else:
|
| 280 |
+
encoder = AutoModel.from_pretrained(config.audio_model_id, **encoder_kwargs)
|
| 281 |
+
|
| 282 |
+
encoder.requires_grad_(False)
|
| 283 |
+
encoder.eval()
|
| 284 |
+
return encoder
|
| 285 |
+
|
| 286 |
+
@classmethod
|
| 287 |
+
def _load_language_model(cls, config: ASRConfig, dtype: torch.dtype) -> PreTrainedModel:
|
| 288 |
+
"""Load and freeze the language model."""
|
| 289 |
+
decoder_kwargs = {
|
| 290 |
+
"attn_implementation": config.attn_implementation,
|
| 291 |
+
"trust_remote_code": True,
|
| 292 |
+
"tie_word_embeddings": False,
|
| 293 |
+
"low_cpu_mem_usage": True,
|
| 294 |
+
"dtype": dtype,
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
decoder = AutoModelForCausalLM.from_pretrained(config.text_model_id, **decoder_kwargs)
|
| 298 |
+
decoder.config.use_cache = getattr(config, "use_cache", True)
|
| 299 |
+
decoder.requires_grad_(False)
|
| 300 |
+
decoder.eval()
|
| 301 |
+
return decoder
|
| 302 |
+
|
| 303 |
+
def _create_projector(self, config: ASRConfig, dtype: torch.dtype) -> nn.Module:
|
| 304 |
+
"""Create the trainable audio projector."""
|
| 305 |
+
# Auto-detect dimensions if not specified
|
| 306 |
+
if config.encoder_dim is None:
|
| 307 |
+
enc_cfg = self.audio_tower.config
|
| 308 |
+
config.encoder_dim = getattr(enc_cfg, "hidden_size", None) or getattr(
|
| 309 |
+
enc_cfg, "d_model", None
|
| 310 |
+
)
|
| 311 |
+
if config.encoder_dim is None:
|
| 312 |
+
raise ValueError("Could not auto-detect encoder_dim. Please specify in config.")
|
| 313 |
+
|
| 314 |
+
if config.llm_dim is None:
|
| 315 |
+
dec_cfg = self.language_model.config
|
| 316 |
+
config.llm_dim = getattr(dec_cfg, "hidden_size", None) or getattr(
|
| 317 |
+
dec_cfg, "d_model", None
|
| 318 |
+
)
|
| 319 |
+
if config.llm_dim is None:
|
| 320 |
+
raise ValueError("Could not auto-detect llm_dim. Please specify in config.")
|
| 321 |
+
|
| 322 |
+
# Select projector type based on config
|
| 323 |
+
projector_type = getattr(config, "projector_type", "mlp")
|
| 324 |
+
projector_class = PROJECTOR_CLASSES.get(projector_type)
|
| 325 |
+
if projector_class is None:
|
| 326 |
+
raise ValueError(
|
| 327 |
+
f"Unknown projector_type: {projector_type}. "
|
| 328 |
+
f"Valid options: {list(PROJECTOR_CLASSES.keys())}"
|
| 329 |
+
)
|
| 330 |
+
projector = projector_class(config)
|
| 331 |
+
|
| 332 |
+
# Move projector to same device as language model (important when using quantization)
|
| 333 |
+
device = next(self.language_model.parameters()).device
|
| 334 |
+
return projector.to(device=device, dtype=dtype)
|
| 335 |
+
|
| 336 |
+
def _init_tokenizer(self, config: ASRConfig):
|
| 337 |
+
"""Initialize tokenizer with audio token."""
|
| 338 |
+
self.tokenizer = AutoTokenizer.from_pretrained(config.text_model_id, trust_remote_code=True)
|
| 339 |
+
|
| 340 |
+
# Set pad token
|
| 341 |
+
if (
|
| 342 |
+
self.tokenizer.pad_token is None
|
| 343 |
+
or self.tokenizer.pad_token_id == self.tokenizer.eos_token_id
|
| 344 |
+
) and "<|finetune_right_pad_id|>" in self.tokenizer.get_vocab():
|
| 345 |
+
self.tokenizer.pad_token = "<|finetune_right_pad_id|>"
|
| 346 |
+
|
| 347 |
+
# Add audio token
|
| 348 |
+
existing_special = getattr(self.tokenizer, "additional_special_tokens", None) or []
|
| 349 |
+
if "<audio>" not in existing_special:
|
| 350 |
+
self.tokenizer.add_special_tokens(
|
| 351 |
+
{"additional_special_tokens": existing_special + ["<audio>"]}
|
| 352 |
+
)
|
| 353 |
+
self.language_model.resize_token_embeddings(len(self.tokenizer), mean_resizing=False)
|
| 354 |
+
|
| 355 |
+
self.audio_token_id = self.tokenizer.convert_tokens_to_ids("<audio>")
|
| 356 |
+
self.tokenizer.padding_side = "right"
|
| 357 |
+
|
| 358 |
+
# Sync token IDs to configs
|
| 359 |
+
for cfg in [self.config.text_config, self.language_model.config, self.generation_config]:
|
| 360 |
+
if cfg is not None:
|
| 361 |
+
cfg.pad_token_id = self.tokenizer.pad_token_id
|
| 362 |
+
cfg.eos_token_id = self.tokenizer.eos_token_id
|
| 363 |
+
cfg.bos_token_id = self.tokenizer.bos_token_id
|
| 364 |
+
|
| 365 |
+
def _init_weights(self, module):
|
| 366 |
+
"""Weight initialization (projector weights are initialized in MoEAudioProjector)."""
|
| 367 |
+
pass
|
| 368 |
+
|
| 369 |
+
def _set_gradient_checkpointing(self, enable: bool = True, gradient_checkpointing_func=None):
|
| 370 |
+
"""Enable/disable gradient checkpointing for the language model."""
|
| 371 |
+
# The LLM still stores activations during forward for backprop to projector
|
| 372 |
+
# Gradient checkpointing trades compute for memory by recomputing activations
|
| 373 |
+
if hasattr(self.language_model, "_set_gradient_checkpointing"):
|
| 374 |
+
self.language_model._set_gradient_checkpointing(enable, gradient_checkpointing_func)
|
| 375 |
+
elif hasattr(self.language_model, "gradient_checkpointing_enable") and enable:
|
| 376 |
+
self.language_model.gradient_checkpointing_enable(
|
| 377 |
+
gradient_checkpointing_kwargs={"use_reentrant": False}
|
| 378 |
+
)
|
| 379 |
+
elif hasattr(self.language_model, "gradient_checkpointing_disable") and not enable:
|
| 380 |
+
self.language_model.gradient_checkpointing_disable()
|
| 381 |
+
|
| 382 |
+
def get_input_embeddings(self):
|
| 383 |
+
return self.language_model.get_input_embeddings()
|
| 384 |
+
|
| 385 |
+
def set_input_embeddings(self, value):
|
| 386 |
+
self.language_model.set_input_embeddings(value)
|
| 387 |
+
|
| 388 |
+
def get_output_embeddings(self):
|
| 389 |
+
return self.language_model.get_output_embeddings()
|
| 390 |
+
|
| 391 |
+
def set_output_embeddings(self, value):
|
| 392 |
+
self.language_model.set_output_embeddings(value)
|
| 393 |
+
|
| 394 |
+
def get_processor(self):
|
| 395 |
+
"""Get the processor for this model."""
|
| 396 |
+
try:
|
| 397 |
+
from .asr_processing import ASRProcessor
|
| 398 |
+
except ImportError:
|
| 399 |
+
from asr_processing import ASRProcessor # type: ignore[no-redef]
|
| 400 |
+
|
| 401 |
+
return ASRProcessor(
|
| 402 |
+
feature_extractor=self.feature_extractor,
|
| 403 |
+
tokenizer=self.tokenizer,
|
| 404 |
+
projector=self.projector,
|
| 405 |
+
encoder_conv_layers=self.config.encoder_conv_layers,
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
def state_dict(self, *args, **kwargs):
|
| 409 |
+
"""Only save trainable projector weights."""
|
| 410 |
+
return {f"projector.{k}": v for k, v in self.projector.state_dict().items()}
|
| 411 |
+
|
| 412 |
+
def _compute_encoder_output_lengths(
|
| 413 |
+
self,
|
| 414 |
+
audio_attention_mask: torch.Tensor,
|
| 415 |
+
) -> torch.Tensor:
|
| 416 |
+
"""Compute per-sample encoder output lengths using conv layer formulas.
|
| 417 |
+
|
| 418 |
+
Args:
|
| 419 |
+
audio_attention_mask: Mask indicating real vs padded mel frames (batch, mel_len)
|
| 420 |
+
|
| 421 |
+
Returns:
|
| 422 |
+
Tensor of encoder output lengths per sample (batch,)
|
| 423 |
+
"""
|
| 424 |
+
# Get mel frame lengths from attention mask
|
| 425 |
+
lengths = audio_attention_mask.sum(dim=-1)
|
| 426 |
+
|
| 427 |
+
# Apply conv layer formulas: output = (input + 2*pad - (kernel-1) - 1) // stride + 1
|
| 428 |
+
for padding, kernel_size, stride in self.config.encoder_conv_layers:
|
| 429 |
+
lengths = (lengths + 2 * padding - (kernel_size - 1) - 1) // stride + 1
|
| 430 |
+
|
| 431 |
+
return lengths
|
| 432 |
+
|
| 433 |
+
def _encode_audio(
|
| 434 |
+
self,
|
| 435 |
+
audio_features: torch.Tensor,
|
| 436 |
+
audio_attention_mask: torch.Tensor,
|
| 437 |
+
) -> torch.Tensor:
|
| 438 |
+
"""Encode audio and project to LLM embedding space.
|
| 439 |
+
|
| 440 |
+
Args:
|
| 441 |
+
audio_features: Mel spectrogram features (batch, n_mels, mel_len)
|
| 442 |
+
audio_attention_mask: Mask indicating real vs padded mel frames (batch, mel_len)
|
| 443 |
+
|
| 444 |
+
Returns:
|
| 445 |
+
Flattened audio embeddings of shape (total_audio_tokens, hidden_dim).
|
| 446 |
+
"""
|
| 447 |
+
with torch.no_grad():
|
| 448 |
+
encoder_out = self.audio_tower(input_features=audio_features)
|
| 449 |
+
hidden_states = encoder_out.last_hidden_state
|
| 450 |
+
|
| 451 |
+
# Compute per-sample encoder output lengths using conv formulas
|
| 452 |
+
encoder_lengths = self._compute_encoder_output_lengths(audio_attention_mask)
|
| 453 |
+
|
| 454 |
+
# Project to LLM space
|
| 455 |
+
audio_embeds = self.projector(hidden_states)
|
| 456 |
+
|
| 457 |
+
# Compute per-sample projector output lengths
|
| 458 |
+
projector_lengths = torch.tensor(
|
| 459 |
+
[self.projector.get_output_length(int(length.item())) for length in encoder_lengths],
|
| 460 |
+
device=audio_embeds.device,
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
# Create valid mask for variable-length samples and extract only real embeddings
|
| 464 |
+
max_len = audio_embeds.shape[1]
|
| 465 |
+
valid_mask = (
|
| 466 |
+
torch.arange(max_len, device=audio_embeds.device)[None, :] < projector_lengths[:, None]
|
| 467 |
+
)
|
| 468 |
+
return audio_embeds[valid_mask]
|
| 469 |
+
|
| 470 |
+
def forward(
|
| 471 |
+
self,
|
| 472 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 473 |
+
input_features: Optional[torch.Tensor] = None,
|
| 474 |
+
audio_attention_mask: Optional[torch.Tensor] = None,
|
| 475 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 476 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 477 |
+
past_key_values: Optional[torch.Tensor] = None,
|
| 478 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 479 |
+
labels: Optional[torch.Tensor] = None,
|
| 480 |
+
use_cache: Optional[bool] = None,
|
| 481 |
+
cache_position: Optional[torch.Tensor] = None,
|
| 482 |
+
**kwargs,
|
| 483 |
+
) -> CausalLMOutputWithPast:
|
| 484 |
+
"""Forward pass for training and inference."""
|
| 485 |
+
# Get text embeddings if not provided
|
| 486 |
+
if inputs_embeds is None:
|
| 487 |
+
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 488 |
+
|
| 489 |
+
if input_features is not None and input_ids is not None:
|
| 490 |
+
# Apply SpecAugment during training if enabled
|
| 491 |
+
if self.training and getattr(self.config, "use_specaugment", False):
|
| 492 |
+
input_features = apply_specaugment(
|
| 493 |
+
input_features,
|
| 494 |
+
mask_time_prob=self.config.mask_time_prob,
|
| 495 |
+
mask_time_length=self.config.mask_time_length,
|
| 496 |
+
mask_time_min_masks=self.config.mask_time_min_masks,
|
| 497 |
+
mask_feature_prob=self.config.mask_feature_prob,
|
| 498 |
+
mask_feature_length=self.config.mask_feature_length,
|
| 499 |
+
mask_feature_min_masks=self.config.mask_feature_min_masks,
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
# Encode audio -> flattened (total_audio_tokens, hidden_dim)
|
| 503 |
+
audio_embeds = self._encode_audio(input_features, audio_attention_mask)
|
| 504 |
+
|
| 505 |
+
# Replace <audio> token placeholders with audio embeddings using masked_scatter
|
| 506 |
+
audio_token_mask = (input_ids == self.audio_token_id).unsqueeze(-1)
|
| 507 |
+
inputs_embeds = inputs_embeds.masked_scatter(
|
| 508 |
+
audio_token_mask.to(inputs_embeds.device),
|
| 509 |
+
audio_embeds.to(inputs_embeds.device, dtype=inputs_embeds.dtype),
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
# Run through language model (let it compute loss if labels provided)
|
| 513 |
+
outputs = self.language_model(
|
| 514 |
+
attention_mask=attention_mask,
|
| 515 |
+
position_ids=position_ids,
|
| 516 |
+
past_key_values=past_key_values,
|
| 517 |
+
inputs_embeds=inputs_embeds,
|
| 518 |
+
labels=labels,
|
| 519 |
+
use_cache=use_cache,
|
| 520 |
+
cache_position=cache_position,
|
| 521 |
+
**kwargs,
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
# Add auxiliary loss from MoE projectors if available
|
| 525 |
+
if outputs.loss is not None and hasattr(self.projector, "get_aux_loss"):
|
| 526 |
+
aux_loss = self.projector.get_aux_loss()
|
| 527 |
+
if aux_loss is not None and aux_loss.numel() > 0:
|
| 528 |
+
outputs.loss = outputs.loss + aux_loss.to(outputs.loss.device)
|
| 529 |
+
|
| 530 |
+
return outputs
|
| 531 |
+
|
| 532 |
+
def prepare_inputs_for_generation(self, *args, **kwargs):
|
| 533 |
+
"""Prepare inputs for generation, handling audio features for cached decoding."""
|
| 534 |
+
input_features = kwargs.pop("input_features", None)
|
| 535 |
+
cache_position = kwargs.get("cache_position")
|
| 536 |
+
|
| 537 |
+
model_inputs = self.language_model.prepare_inputs_for_generation(*args, **kwargs)
|
| 538 |
+
|
| 539 |
+
# Only pass audio features on the first generation step (cache_position[0] == 0)
|
| 540 |
+
if cache_position is not None and cache_position[0] == 0 and input_features is not None:
|
| 541 |
+
model_inputs["input_features"] = input_features
|
| 542 |
+
|
| 543 |
+
return model_inputs
|
| 544 |
+
|
| 545 |
+
def _get_num_audio_tokens(
|
| 546 |
+
self,
|
| 547 |
+
audio_attention_mask: torch.Tensor,
|
| 548 |
+
) -> int:
|
| 549 |
+
"""Calculate number of audio tokens based on actual audio length.
|
| 550 |
+
|
| 551 |
+
Uses attention mask to get real audio length, then computes:
|
| 552 |
+
mel_frames -> encoder_frames (via conv formulas) -> projector output tokens
|
| 553 |
+
"""
|
| 554 |
+
encoder_lengths = self._compute_encoder_output_lengths(audio_attention_mask)
|
| 555 |
+
# Use max length for batch (all samples should have same token count for generation)
|
| 556 |
+
encoder_output_len = int(encoder_lengths.max().item())
|
| 557 |
+
return int(self.projector.get_output_length(encoder_output_len))
|
| 558 |
+
|
| 559 |
+
@torch.no_grad()
|
| 560 |
+
def generate(
|
| 561 |
+
self,
|
| 562 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 563 |
+
input_features: Optional[torch.Tensor] = None,
|
| 564 |
+
audio_attention_mask: Optional[torch.Tensor] = None,
|
| 565 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 566 |
+
system_prompt: Optional[str] = None,
|
| 567 |
+
**generate_kwargs,
|
| 568 |
+
) -> torch.Tensor:
|
| 569 |
+
"""Generate transcription from audio input.
|
| 570 |
+
|
| 571 |
+
Can be called in two ways:
|
| 572 |
+
1. With input_ids containing <audio> tokens (from processor)
|
| 573 |
+
2. With just audio, and we build the prompt internally
|
| 574 |
+
"""
|
| 575 |
+
if input_features is None:
|
| 576 |
+
raise ValueError("input_features required for generation")
|
| 577 |
+
if audio_attention_mask is None:
|
| 578 |
+
raise ValueError("audio_attention_mask required for generation")
|
| 579 |
+
|
| 580 |
+
device = input_features.device
|
| 581 |
+
batch_size = input_features.shape[0]
|
| 582 |
+
|
| 583 |
+
# Encode audio -> flattened embeddings
|
| 584 |
+
audio_embeds = self._encode_audio(input_features, audio_attention_mask)
|
| 585 |
+
|
| 586 |
+
# If input_ids not provided, build prompt with correct number of audio tokens
|
| 587 |
+
if input_ids is None:
|
| 588 |
+
num_audio_tokens = self._get_num_audio_tokens(audio_attention_mask)
|
| 589 |
+
audio_placeholder = "<audio>" * num_audio_tokens
|
| 590 |
+
|
| 591 |
+
system_prompt = system_prompt or self.system_prompt
|
| 592 |
+
|
| 593 |
+
messages: list[dict[str, str]] = []
|
| 594 |
+
if system_prompt:
|
| 595 |
+
messages.append({"role": "system", "content": system_prompt})
|
| 596 |
+
messages.append({"role": "user", "content": self.TRANSCRIBE_PROMPT + audio_placeholder})
|
| 597 |
+
|
| 598 |
+
chat_result = self.tokenizer.apply_chat_template(
|
| 599 |
+
messages,
|
| 600 |
+
tokenize=True,
|
| 601 |
+
add_generation_prompt=True,
|
| 602 |
+
return_tensors="pt",
|
| 603 |
+
)
|
| 604 |
+
input_ids = chat_result.input_ids.to(device)
|
| 605 |
+
|
| 606 |
+
if input_ids.dim() == 1:
|
| 607 |
+
input_ids = input_ids.unsqueeze(0)
|
| 608 |
+
if input_ids.shape[0] == 1 and batch_size > 1:
|
| 609 |
+
input_ids = input_ids.expand(batch_size, -1)
|
| 610 |
+
|
| 611 |
+
attention_mask = torch.ones_like(input_ids)
|
| 612 |
+
|
| 613 |
+
# Get text embeddings and replace audio tokens with audio embeddings
|
| 614 |
+
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 615 |
+
audio_token_mask = (input_ids == self.audio_token_id).unsqueeze(-1)
|
| 616 |
+
inputs_embeds = inputs_embeds.masked_scatter(
|
| 617 |
+
audio_token_mask.to(inputs_embeds.device),
|
| 618 |
+
audio_embeds.to(inputs_embeds.device, dtype=inputs_embeds.dtype),
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
# Generate using language model
|
| 622 |
+
output = self.language_model.generate(
|
| 623 |
+
inputs_embeds=inputs_embeds,
|
| 624 |
+
attention_mask=attention_mask,
|
| 625 |
+
generation_config=self.generation_config,
|
| 626 |
+
**generate_kwargs,
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
# When using inputs_embeds without input_ids, generate returns only new tokens
|
| 630 |
+
if isinstance(output, torch.Tensor):
|
| 631 |
+
return output
|
| 632 |
+
return output.sequences
|
| 633 |
+
|
| 634 |
+
def generate_streaming(
|
| 635 |
+
self,
|
| 636 |
+
input_features: torch.Tensor,
|
| 637 |
+
audio_attention_mask: torch.Tensor,
|
| 638 |
+
system_prompt: Optional[str] = None,
|
| 639 |
+
**generate_kwargs,
|
| 640 |
+
) -> Iterator[str]:
|
| 641 |
+
"""Generate transcription with streaming token output.
|
| 642 |
+
|
| 643 |
+
Yields partial transcript strings as tokens are generated.
|
| 644 |
+
Reduces time-to-first-word by streaming tokens as they're decoded.
|
| 645 |
+
|
| 646 |
+
Args:
|
| 647 |
+
input_features: Mel spectrogram features (batch, n_mels, mel_len)
|
| 648 |
+
audio_attention_mask: Mask for real vs padded mel frames (batch, mel_len)
|
| 649 |
+
system_prompt: Optional system prompt override
|
| 650 |
+
**generate_kwargs: Additional generation arguments
|
| 651 |
+
|
| 652 |
+
Yields:
|
| 653 |
+
Partial transcript text as each token is generated
|
| 654 |
+
"""
|
| 655 |
+
device = input_features.device
|
| 656 |
+
batch_size = input_features.shape[0]
|
| 657 |
+
|
| 658 |
+
# Encode audio -> flattened embeddings
|
| 659 |
+
audio_embeds = self._encode_audio(input_features, audio_attention_mask)
|
| 660 |
+
|
| 661 |
+
# Build prompt with correct number of audio tokens
|
| 662 |
+
num_audio_tokens = self._get_num_audio_tokens(audio_attention_mask)
|
| 663 |
+
audio_placeholder = "<audio>" * num_audio_tokens
|
| 664 |
+
|
| 665 |
+
system_prompt = system_prompt or self.system_prompt
|
| 666 |
+
|
| 667 |
+
messages: list[dict[str, str]] = []
|
| 668 |
+
if system_prompt:
|
| 669 |
+
messages.append({"role": "system", "content": system_prompt})
|
| 670 |
+
messages.append({"role": "user", "content": self.TRANSCRIBE_PROMPT + audio_placeholder})
|
| 671 |
+
|
| 672 |
+
chat_result = self.tokenizer.apply_chat_template(
|
| 673 |
+
messages,
|
| 674 |
+
tokenize=True,
|
| 675 |
+
add_generation_prompt=True,
|
| 676 |
+
return_tensors="pt",
|
| 677 |
+
)
|
| 678 |
+
input_ids = chat_result.input_ids.to(device)
|
| 679 |
+
|
| 680 |
+
if input_ids.dim() == 1:
|
| 681 |
+
input_ids = input_ids.unsqueeze(0)
|
| 682 |
+
if input_ids.shape[0] == 1 and batch_size > 1:
|
| 683 |
+
input_ids = input_ids.expand(batch_size, -1)
|
| 684 |
+
|
| 685 |
+
attention_mask = torch.ones_like(input_ids)
|
| 686 |
+
|
| 687 |
+
# Get text embeddings and replace audio tokens with audio embeddings
|
| 688 |
+
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 689 |
+
audio_token_mask = (input_ids == self.audio_token_id).unsqueeze(-1)
|
| 690 |
+
inputs_embeds = inputs_embeds.masked_scatter(
|
| 691 |
+
audio_token_mask.to(inputs_embeds.device),
|
| 692 |
+
audio_embeds.to(inputs_embeds.device, dtype=inputs_embeds.dtype),
|
| 693 |
+
)
|
| 694 |
+
|
| 695 |
+
# Setup streamer for token-by-token output
|
| 696 |
+
streamer = TextIteratorStreamer(
|
| 697 |
+
self.tokenizer,
|
| 698 |
+
skip_prompt=True,
|
| 699 |
+
skip_special_tokens=True,
|
| 700 |
+
)
|
| 701 |
+
|
| 702 |
+
# Prepare generation kwargs
|
| 703 |
+
gen_kwargs = {
|
| 704 |
+
"inputs_embeds": inputs_embeds,
|
| 705 |
+
"attention_mask": attention_mask,
|
| 706 |
+
"generation_config": self.generation_config,
|
| 707 |
+
"streamer": streamer,
|
| 708 |
+
**generate_kwargs,
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
# Run generation in background thread
|
| 712 |
+
thread = Thread(target=self.language_model.generate, kwargs=gen_kwargs)
|
| 713 |
+
thread.start()
|
| 714 |
+
|
| 715 |
+
# Yield tokens as they're generated, filtering out <think>...</think> blocks
|
| 716 |
+
# Start assuming no think block - only filter when we see <think>
|
| 717 |
+
in_think_block = False
|
| 718 |
+
buffer = ""
|
| 719 |
+
|
| 720 |
+
for text in streamer:
|
| 721 |
+
buffer += text
|
| 722 |
+
|
| 723 |
+
# Check for think block start (in case model outputs think blocks)
|
| 724 |
+
while "<think>" in buffer:
|
| 725 |
+
in_think_block = True
|
| 726 |
+
# Yield any text before <think>
|
| 727 |
+
before_think = buffer.split("<think>")[0]
|
| 728 |
+
if before_think:
|
| 729 |
+
yield before_think
|
| 730 |
+
buffer = buffer.split("<think>", 1)[-1]
|
| 731 |
+
|
| 732 |
+
# Check for think block end
|
| 733 |
+
while in_think_block and "</think>" in buffer:
|
| 734 |
+
in_think_block = False
|
| 735 |
+
buffer = buffer.split("</think>", 1)[-1]
|
| 736 |
+
|
| 737 |
+
# Yield text if not in think block
|
| 738 |
+
if not in_think_block and buffer:
|
| 739 |
+
yield buffer
|
| 740 |
+
buffer = ""
|
| 741 |
+
|
| 742 |
+
# Yield any remaining buffer
|
| 743 |
+
if buffer and not in_think_block:
|
| 744 |
+
yield buffer
|
| 745 |
+
|
| 746 |
+
thread.join()
|
| 747 |
+
|
| 748 |
+
def save_pretrained(self, save_directory: Union[str, Path], **kwargs):
|
| 749 |
+
"""Save model, tokenizer, and processor."""
|
| 750 |
+
import shutil
|
| 751 |
+
from pathlib import Path as PathlibPath
|
| 752 |
+
|
| 753 |
+
save_dir = PathlibPath(save_directory)
|
| 754 |
+
save_dir.mkdir(parents=True, exist_ok=True)
|
| 755 |
+
|
| 756 |
+
# Update config with actual vocab size
|
| 757 |
+
self.config.vocab_size = self.language_model.config.vocab_size
|
| 758 |
+
self.config.text_config.vocab_size = self.language_model.config.vocab_size
|
| 759 |
+
|
| 760 |
+
if hasattr(self.audio_tower.config, "num_mel_bins"):
|
| 761 |
+
self.config.audio_config.num_mel_bins = self.audio_tower.config.num_mel_bins
|
| 762 |
+
|
| 763 |
+
# Save model (temporarily remove non-serializable attributes)
|
| 764 |
+
tokenizer = self.tokenizer
|
| 765 |
+
del self.tokenizer
|
| 766 |
+
|
| 767 |
+
try:
|
| 768 |
+
super().save_pretrained(save_dir, **kwargs)
|
| 769 |
+
finally:
|
| 770 |
+
self.tokenizer = tokenizer
|
| 771 |
+
|
| 772 |
+
# Save tokenizer and feature extractor
|
| 773 |
+
self.tokenizer.save_pretrained(save_dir)
|
| 774 |
+
self.feature_extractor.save_pretrained(save_dir)
|
| 775 |
+
|
| 776 |
+
# Add processor auto_map to preprocessor_config.json
|
| 777 |
+
config_path = save_dir / "preprocessor_config.json"
|
| 778 |
+
if config_path.exists():
|
| 779 |
+
with config_path.open() as f:
|
| 780 |
+
processor_config = json.load(f)
|
| 781 |
+
else:
|
| 782 |
+
processor_config = {}
|
| 783 |
+
|
| 784 |
+
processor_config.update(
|
| 785 |
+
{
|
| 786 |
+
"processor_class": "ASRProcessor",
|
| 787 |
+
"auto_map": {"AutoProcessor": "asr_processing.ASRProcessor"},
|
| 788 |
+
}
|
| 789 |
+
)
|
| 790 |
+
|
| 791 |
+
with config_path.open("w") as f:
|
| 792 |
+
json.dump(processor_config, f, indent=2)
|
| 793 |
+
|
| 794 |
+
# Copy source files for auto-loading
|
| 795 |
+
src_dir = PathlibPath(__file__).parent
|
| 796 |
+
for asr_file in src_dir.glob("asr_*.py"):
|
| 797 |
+
shutil.copy(asr_file, save_dir / asr_file.name)
|
| 798 |
+
# Copy projectors module
|
| 799 |
+
shutil.copy(src_dir / "projectors.py", save_dir / "projectors.py")
|
| 800 |
+
|
| 801 |
+
def create_or_update_model_card(self, output_dir: Union[str, Path]):
|
| 802 |
+
"""No-op for model card creation - we use MODEL_CARD.md in repo instead."""
|
| 803 |
+
pass
|
| 804 |
+
|
| 805 |
+
|
| 806 |
+
# Register with transformers Auto classes
|
| 807 |
+
AutoConfig.register("asr_model", ASRConfig)
|
| 808 |
+
AutoModel.register(ASRConfig, ASRModel)
|
asr_pipeline.py
ADDED
|
@@ -0,0 +1,519 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Any
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import transformers
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
from .asr_modeling import ASRModel
|
| 11 |
+
except ImportError:
|
| 12 |
+
from asr_modeling import ASRModel # type: ignore[no-redef]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class ForcedAligner:
|
| 16 |
+
"""Lazy-loaded forced aligner for word-level timestamps using torchaudio wav2vec2."""
|
| 17 |
+
|
| 18 |
+
_bundle = None
|
| 19 |
+
_model = None
|
| 20 |
+
_labels = None
|
| 21 |
+
_dictionary = None
|
| 22 |
+
|
| 23 |
+
@classmethod
|
| 24 |
+
def get_instance(cls, device: str = "cuda"):
|
| 25 |
+
if cls._model is None:
|
| 26 |
+
import torchaudio
|
| 27 |
+
|
| 28 |
+
cls._bundle = torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H
|
| 29 |
+
cls._model = cls._bundle.get_model().to(device)
|
| 30 |
+
cls._model.eval()
|
| 31 |
+
cls._labels = cls._bundle.get_labels()
|
| 32 |
+
cls._dictionary = {c: i for i, c in enumerate(cls._labels)}
|
| 33 |
+
return cls._model, cls._labels, cls._dictionary
|
| 34 |
+
|
| 35 |
+
@classmethod
|
| 36 |
+
def align(
|
| 37 |
+
cls,
|
| 38 |
+
audio: np.ndarray,
|
| 39 |
+
text: str,
|
| 40 |
+
sample_rate: int = 16000,
|
| 41 |
+
language: str = "eng",
|
| 42 |
+
batch_size: int = 16,
|
| 43 |
+
) -> list[dict]:
|
| 44 |
+
"""Align transcript to audio and return word-level timestamps.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
audio: Audio waveform as numpy array
|
| 48 |
+
text: Transcript text to align
|
| 49 |
+
sample_rate: Audio sample rate (default 16000)
|
| 50 |
+
language: ISO-639-3 language code (default "eng" for English, unused)
|
| 51 |
+
batch_size: Batch size for alignment model (unused)
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
List of dicts with 'word', 'start', 'end' keys
|
| 55 |
+
"""
|
| 56 |
+
import torchaudio
|
| 57 |
+
from torchaudio.functional import forced_align, merge_tokens
|
| 58 |
+
|
| 59 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 60 |
+
model, labels, dictionary = cls.get_instance(device)
|
| 61 |
+
|
| 62 |
+
# Convert audio to tensor (copy to ensure array is writable)
|
| 63 |
+
if isinstance(audio, np.ndarray):
|
| 64 |
+
waveform = torch.from_numpy(audio.copy()).float()
|
| 65 |
+
else:
|
| 66 |
+
waveform = audio.clone().float()
|
| 67 |
+
|
| 68 |
+
# Ensure 2D (channels, time)
|
| 69 |
+
if waveform.dim() == 1:
|
| 70 |
+
waveform = waveform.unsqueeze(0)
|
| 71 |
+
|
| 72 |
+
# Resample if needed (wav2vec2 expects 16kHz)
|
| 73 |
+
if sample_rate != cls._bundle.sample_rate:
|
| 74 |
+
waveform = torchaudio.functional.resample(
|
| 75 |
+
waveform, sample_rate, cls._bundle.sample_rate
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
waveform = waveform.to(device)
|
| 79 |
+
|
| 80 |
+
# Get emissions from model
|
| 81 |
+
with torch.inference_mode():
|
| 82 |
+
emissions, _ = model(waveform)
|
| 83 |
+
emissions = torch.log_softmax(emissions, dim=-1)
|
| 84 |
+
|
| 85 |
+
emission = emissions[0].cpu()
|
| 86 |
+
|
| 87 |
+
# Normalize text: uppercase, keep only valid characters
|
| 88 |
+
transcript = text.upper()
|
| 89 |
+
# Build tokens from transcript
|
| 90 |
+
tokens = []
|
| 91 |
+
for char in transcript:
|
| 92 |
+
if char in dictionary:
|
| 93 |
+
tokens.append(dictionary[char])
|
| 94 |
+
elif char == " ":
|
| 95 |
+
tokens.append(dictionary.get("|", dictionary.get(" ", 0)))
|
| 96 |
+
|
| 97 |
+
if not tokens:
|
| 98 |
+
return []
|
| 99 |
+
|
| 100 |
+
targets = torch.tensor([tokens], dtype=torch.int32)
|
| 101 |
+
|
| 102 |
+
# Run forced alignment
|
| 103 |
+
# Note: forced_align is deprecated in torchaudio 2.6+ and will be removed in 2.9 (late 2025)
|
| 104 |
+
# No official replacement announced yet. See https://github.com/pytorch/audio/issues/3902
|
| 105 |
+
aligned_tokens, scores = forced_align(emission.unsqueeze(0), targets, blank=0)
|
| 106 |
+
|
| 107 |
+
# Use torchaudio's merge_tokens to get token spans (removes blanks and merges repeats)
|
| 108 |
+
token_spans = merge_tokens(aligned_tokens[0], scores[0])
|
| 109 |
+
|
| 110 |
+
# Convert frame indices to time (model stride is 320 samples at 16kHz = 20ms)
|
| 111 |
+
frame_duration = 320 / cls._bundle.sample_rate
|
| 112 |
+
|
| 113 |
+
# Group token spans into words based on pipe separator
|
| 114 |
+
words = text.split()
|
| 115 |
+
word_timestamps = []
|
| 116 |
+
current_word_start = None
|
| 117 |
+
current_word_end = None
|
| 118 |
+
word_idx = 0
|
| 119 |
+
|
| 120 |
+
for span in token_spans:
|
| 121 |
+
token_char = labels[span.token]
|
| 122 |
+
if token_char == "|": # Word separator
|
| 123 |
+
if current_word_start is not None and word_idx < len(words):
|
| 124 |
+
word_timestamps.append(
|
| 125 |
+
{
|
| 126 |
+
"word": words[word_idx],
|
| 127 |
+
"start": current_word_start * frame_duration,
|
| 128 |
+
"end": current_word_end * frame_duration,
|
| 129 |
+
}
|
| 130 |
+
)
|
| 131 |
+
word_idx += 1
|
| 132 |
+
current_word_start = None
|
| 133 |
+
current_word_end = None
|
| 134 |
+
else:
|
| 135 |
+
if current_word_start is None:
|
| 136 |
+
current_word_start = span.start
|
| 137 |
+
current_word_end = span.end
|
| 138 |
+
|
| 139 |
+
# Don't forget the last word
|
| 140 |
+
if current_word_start is not None and word_idx < len(words):
|
| 141 |
+
word_timestamps.append(
|
| 142 |
+
{
|
| 143 |
+
"word": words[word_idx],
|
| 144 |
+
"start": current_word_start * frame_duration,
|
| 145 |
+
"end": current_word_end * frame_duration,
|
| 146 |
+
}
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
return word_timestamps
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class SpeakerDiarizer:
|
| 153 |
+
"""Lazy-loaded speaker diarization using pyannote-audio."""
|
| 154 |
+
|
| 155 |
+
_pipeline = None
|
| 156 |
+
|
| 157 |
+
@classmethod
|
| 158 |
+
def get_instance(cls, hf_token: str | None = None):
|
| 159 |
+
"""Get or create the diarization pipeline.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
hf_token: HuggingFace token with access to pyannote models.
|
| 163 |
+
Can also be set via HF_TOKEN environment variable.
|
| 164 |
+
"""
|
| 165 |
+
if cls._pipeline is None:
|
| 166 |
+
from pyannote.audio import Pipeline
|
| 167 |
+
|
| 168 |
+
cls._pipeline = Pipeline.from_pretrained(
|
| 169 |
+
"pyannote/speaker-diarization-3.1",
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
# Move to GPU if available
|
| 173 |
+
if torch.cuda.is_available():
|
| 174 |
+
cls._pipeline.to(torch.device("cuda"))
|
| 175 |
+
elif torch.backends.mps.is_available():
|
| 176 |
+
cls._pipeline.to(torch.device("mps"))
|
| 177 |
+
|
| 178 |
+
return cls._pipeline
|
| 179 |
+
|
| 180 |
+
@classmethod
|
| 181 |
+
def diarize(
|
| 182 |
+
cls,
|
| 183 |
+
audio: np.ndarray | str,
|
| 184 |
+
sample_rate: int = 16000,
|
| 185 |
+
num_speakers: int | None = None,
|
| 186 |
+
min_speakers: int | None = None,
|
| 187 |
+
max_speakers: int | None = None,
|
| 188 |
+
hf_token: str | None = None,
|
| 189 |
+
) -> list[dict]:
|
| 190 |
+
"""Run speaker diarization on audio.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
audio: Audio waveform as numpy array or path to audio file
|
| 194 |
+
sample_rate: Audio sample rate (default 16000)
|
| 195 |
+
num_speakers: Exact number of speakers (if known)
|
| 196 |
+
min_speakers: Minimum number of speakers
|
| 197 |
+
max_speakers: Maximum number of speakers
|
| 198 |
+
hf_token: HuggingFace token for pyannote models
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
List of dicts with 'speaker', 'start', 'end' keys
|
| 202 |
+
"""
|
| 203 |
+
pipeline = cls.get_instance(hf_token)
|
| 204 |
+
|
| 205 |
+
# Prepare audio input
|
| 206 |
+
if isinstance(audio, np.ndarray):
|
| 207 |
+
# pyannote expects {"waveform": tensor, "sample_rate": int}
|
| 208 |
+
waveform = torch.from_numpy(audio).unsqueeze(0) # Add channel dim
|
| 209 |
+
if waveform.dim() == 1:
|
| 210 |
+
waveform = waveform.unsqueeze(0)
|
| 211 |
+
audio_input = {"waveform": waveform, "sample_rate": sample_rate}
|
| 212 |
+
else:
|
| 213 |
+
# File path
|
| 214 |
+
audio_input = audio
|
| 215 |
+
|
| 216 |
+
# Run diarization
|
| 217 |
+
diarization_args = {}
|
| 218 |
+
if num_speakers is not None:
|
| 219 |
+
diarization_args["num_speakers"] = num_speakers
|
| 220 |
+
if min_speakers is not None:
|
| 221 |
+
diarization_args["min_speakers"] = min_speakers
|
| 222 |
+
if max_speakers is not None:
|
| 223 |
+
diarization_args["max_speakers"] = max_speakers
|
| 224 |
+
|
| 225 |
+
diarization = pipeline(audio_input, **diarization_args)
|
| 226 |
+
|
| 227 |
+
# Handle different pyannote return types
|
| 228 |
+
# pyannote 3.x returns DiarizeOutput dataclass, older versions return Annotation
|
| 229 |
+
if hasattr(diarization, "itertracks"):
|
| 230 |
+
annotation = diarization
|
| 231 |
+
elif hasattr(diarization, "speaker_diarization"):
|
| 232 |
+
# pyannote 3.x DiarizeOutput dataclass
|
| 233 |
+
annotation = diarization.speaker_diarization
|
| 234 |
+
elif isinstance(diarization, tuple):
|
| 235 |
+
# Some versions return (annotation, embeddings) tuple
|
| 236 |
+
annotation = diarization[0]
|
| 237 |
+
else:
|
| 238 |
+
raise TypeError(f"Unexpected diarization output type: {type(diarization)}")
|
| 239 |
+
|
| 240 |
+
# Convert to simple format
|
| 241 |
+
segments = []
|
| 242 |
+
for turn, _, speaker in annotation.itertracks(yield_label=True):
|
| 243 |
+
segments.append(
|
| 244 |
+
{
|
| 245 |
+
"speaker": speaker,
|
| 246 |
+
"start": turn.start,
|
| 247 |
+
"end": turn.end,
|
| 248 |
+
}
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
return segments
|
| 252 |
+
|
| 253 |
+
@classmethod
|
| 254 |
+
def assign_speakers_to_words(
|
| 255 |
+
cls,
|
| 256 |
+
words: list[dict],
|
| 257 |
+
speaker_segments: list[dict],
|
| 258 |
+
) -> list[dict]:
|
| 259 |
+
"""Assign speaker labels to words based on timestamp overlap.
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
words: List of word dicts with 'word', 'start', 'end' keys
|
| 263 |
+
speaker_segments: List of speaker dicts with 'speaker', 'start', 'end' keys
|
| 264 |
+
|
| 265 |
+
Returns:
|
| 266 |
+
Words list with 'speaker' key added to each word
|
| 267 |
+
"""
|
| 268 |
+
for word in words:
|
| 269 |
+
word_mid = (word["start"] + word["end"]) / 2
|
| 270 |
+
|
| 271 |
+
# Find the speaker segment that contains this word's midpoint
|
| 272 |
+
best_speaker = None
|
| 273 |
+
for seg in speaker_segments:
|
| 274 |
+
if seg["start"] <= word_mid <= seg["end"]:
|
| 275 |
+
best_speaker = seg["speaker"]
|
| 276 |
+
break
|
| 277 |
+
|
| 278 |
+
# If no exact match, find closest segment
|
| 279 |
+
if best_speaker is None and speaker_segments:
|
| 280 |
+
min_dist = float("inf")
|
| 281 |
+
for seg in speaker_segments:
|
| 282 |
+
seg_mid = (seg["start"] + seg["end"]) / 2
|
| 283 |
+
dist = abs(word_mid - seg_mid)
|
| 284 |
+
if dist < min_dist:
|
| 285 |
+
min_dist = dist
|
| 286 |
+
best_speaker = seg["speaker"]
|
| 287 |
+
|
| 288 |
+
word["speaker"] = best_speaker
|
| 289 |
+
|
| 290 |
+
return words
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
class ASRPipeline(transformers.AutomaticSpeechRecognitionPipeline):
|
| 294 |
+
"""ASR Pipeline for audio-to-text transcription."""
|
| 295 |
+
|
| 296 |
+
model: ASRModel
|
| 297 |
+
|
| 298 |
+
def __init__(self, model: ASRModel, **kwargs):
|
| 299 |
+
feature_extractor = kwargs.pop("feature_extractor", None)
|
| 300 |
+
tokenizer = kwargs.pop("tokenizer", model.tokenizer)
|
| 301 |
+
|
| 302 |
+
if feature_extractor is None:
|
| 303 |
+
feature_extractor = model.get_processor().feature_extractor
|
| 304 |
+
|
| 305 |
+
super().__init__(
|
| 306 |
+
model=model, feature_extractor=feature_extractor, tokenizer=tokenizer, **kwargs
|
| 307 |
+
)
|
| 308 |
+
self._current_audio = None
|
| 309 |
+
|
| 310 |
+
def _sanitize_parameters(self, **kwargs):
|
| 311 |
+
"""Intercept our custom parameters before parent class validates them."""
|
| 312 |
+
# Remove our custom parameters so parent doesn't see them
|
| 313 |
+
kwargs.pop("return_timestamps", None)
|
| 314 |
+
kwargs.pop("return_speakers", None)
|
| 315 |
+
kwargs.pop("num_speakers", None)
|
| 316 |
+
kwargs.pop("min_speakers", None)
|
| 317 |
+
kwargs.pop("max_speakers", None)
|
| 318 |
+
kwargs.pop("hf_token", None)
|
| 319 |
+
|
| 320 |
+
return super()._sanitize_parameters(**kwargs)
|
| 321 |
+
|
| 322 |
+
def __call__(
|
| 323 |
+
self,
|
| 324 |
+
inputs,
|
| 325 |
+
**kwargs,
|
| 326 |
+
):
|
| 327 |
+
"""Transcribe audio with optional word-level timestamps and speaker diarization.
|
| 328 |
+
|
| 329 |
+
Args:
|
| 330 |
+
inputs: Audio input (file path, dict with array/sampling_rate, etc.)
|
| 331 |
+
return_timestamps: If True, return word-level timestamps using forced alignment
|
| 332 |
+
return_speakers: If True, return speaker labels for each word
|
| 333 |
+
num_speakers: Exact number of speakers (if known, for diarization)
|
| 334 |
+
min_speakers: Minimum number of speakers (for diarization)
|
| 335 |
+
max_speakers: Maximum number of speakers (for diarization)
|
| 336 |
+
hf_token: HuggingFace token for pyannote models (or set HF_TOKEN env var)
|
| 337 |
+
**kwargs: Additional arguments passed to the pipeline
|
| 338 |
+
|
| 339 |
+
Returns:
|
| 340 |
+
Dict with 'text' key, 'words' key if return_timestamps=True,
|
| 341 |
+
and speaker labels on words if return_speakers=True
|
| 342 |
+
"""
|
| 343 |
+
# Extract our params before super().__call__ (which will also call _sanitize_parameters)
|
| 344 |
+
return_timestamps = kwargs.pop("return_timestamps", False)
|
| 345 |
+
return_speakers = kwargs.pop("return_speakers", False)
|
| 346 |
+
diarization_params = {
|
| 347 |
+
"num_speakers": kwargs.pop("num_speakers", None),
|
| 348 |
+
"min_speakers": kwargs.pop("min_speakers", None),
|
| 349 |
+
"max_speakers": kwargs.pop("max_speakers", None),
|
| 350 |
+
"hf_token": kwargs.pop("hf_token", None),
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
if return_speakers:
|
| 354 |
+
return_timestamps = True
|
| 355 |
+
|
| 356 |
+
# Store audio for timestamp alignment and diarization
|
| 357 |
+
if return_timestamps or return_speakers:
|
| 358 |
+
self._current_audio = self._extract_audio(inputs)
|
| 359 |
+
|
| 360 |
+
# Run standard transcription
|
| 361 |
+
result = super().__call__(inputs, **kwargs)
|
| 362 |
+
|
| 363 |
+
# Add timestamps if requested
|
| 364 |
+
if return_timestamps and self._current_audio is not None:
|
| 365 |
+
text = result.get("text", "")
|
| 366 |
+
if text:
|
| 367 |
+
try:
|
| 368 |
+
words = ForcedAligner.align(
|
| 369 |
+
self._current_audio["array"],
|
| 370 |
+
text,
|
| 371 |
+
sample_rate=self._current_audio.get("sampling_rate", 16000),
|
| 372 |
+
)
|
| 373 |
+
result["words"] = words
|
| 374 |
+
except Exception as e:
|
| 375 |
+
result["words"] = []
|
| 376 |
+
result["timestamp_error"] = str(e)
|
| 377 |
+
else:
|
| 378 |
+
result["words"] = []
|
| 379 |
+
|
| 380 |
+
# Add speaker diarization if requested
|
| 381 |
+
if return_speakers and self._current_audio is not None:
|
| 382 |
+
try:
|
| 383 |
+
# Run diarization
|
| 384 |
+
speaker_segments = SpeakerDiarizer.diarize(
|
| 385 |
+
self._current_audio["array"],
|
| 386 |
+
sample_rate=self._current_audio.get("sampling_rate", 16000),
|
| 387 |
+
**{k: v for k, v in diarization_params.items() if v is not None},
|
| 388 |
+
)
|
| 389 |
+
result["speaker_segments"] = speaker_segments
|
| 390 |
+
|
| 391 |
+
# Assign speakers to words
|
| 392 |
+
if result.get("words"):
|
| 393 |
+
result["words"] = SpeakerDiarizer.assign_speakers_to_words(
|
| 394 |
+
result["words"],
|
| 395 |
+
speaker_segments,
|
| 396 |
+
)
|
| 397 |
+
except Exception as e:
|
| 398 |
+
result["speaker_segments"] = []
|
| 399 |
+
result["diarization_error"] = str(e)
|
| 400 |
+
|
| 401 |
+
# Clean up
|
| 402 |
+
self._current_audio = None
|
| 403 |
+
|
| 404 |
+
return result
|
| 405 |
+
|
| 406 |
+
def _extract_audio(self, inputs) -> dict | None:
|
| 407 |
+
"""Extract audio array from various input formats using HF utilities."""
|
| 408 |
+
from transformers.pipelines.audio_utils import ffmpeg_read
|
| 409 |
+
|
| 410 |
+
if isinstance(inputs, dict):
|
| 411 |
+
if "array" in inputs:
|
| 412 |
+
return {
|
| 413 |
+
"array": inputs["array"],
|
| 414 |
+
"sampling_rate": inputs.get("sampling_rate", 16000),
|
| 415 |
+
}
|
| 416 |
+
if "raw" in inputs:
|
| 417 |
+
return {
|
| 418 |
+
"array": inputs["raw"],
|
| 419 |
+
"sampling_rate": inputs.get("sampling_rate", 16000),
|
| 420 |
+
}
|
| 421 |
+
elif isinstance(inputs, str):
|
| 422 |
+
# File path - load audio using ffmpeg (same as HF pipeline)
|
| 423 |
+
with Path(inputs).open("rb") as f:
|
| 424 |
+
audio = ffmpeg_read(f.read(), sampling_rate=16000)
|
| 425 |
+
return {"array": audio, "sampling_rate": 16000}
|
| 426 |
+
elif isinstance(inputs, bytes):
|
| 427 |
+
audio = ffmpeg_read(inputs, sampling_rate=16000)
|
| 428 |
+
return {"array": audio, "sampling_rate": 16000}
|
| 429 |
+
elif isinstance(inputs, np.ndarray):
|
| 430 |
+
return {"array": inputs, "sampling_rate": 16000}
|
| 431 |
+
|
| 432 |
+
return None
|
| 433 |
+
|
| 434 |
+
def preprocess(self, inputs, **preprocess_params):
|
| 435 |
+
# Handle dict with "array" key (from datasets)
|
| 436 |
+
if isinstance(inputs, dict) and "array" in inputs:
|
| 437 |
+
inputs = {
|
| 438 |
+
"raw": inputs["array"],
|
| 439 |
+
"sampling_rate": inputs.get("sampling_rate", self.feature_extractor.sampling_rate),
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
for item in super().preprocess(inputs, **preprocess_params):
|
| 443 |
+
if "is_last" not in item:
|
| 444 |
+
item["is_last"] = True
|
| 445 |
+
yield item
|
| 446 |
+
|
| 447 |
+
def _forward(self, model_inputs, **generate_kwargs) -> dict[str, Any]:
|
| 448 |
+
# Extract audio features and is_last flag
|
| 449 |
+
is_last = model_inputs.pop("is_last", True) if isinstance(model_inputs, dict) else True
|
| 450 |
+
|
| 451 |
+
input_features = model_inputs["input_features"].to(self.model.device)
|
| 452 |
+
audio_attention_mask = model_inputs["attention_mask"].to(self.model.device)
|
| 453 |
+
|
| 454 |
+
generated_ids = self.model.generate(
|
| 455 |
+
input_features=input_features,
|
| 456 |
+
audio_attention_mask=audio_attention_mask,
|
| 457 |
+
**generate_kwargs,
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
return {"tokens": generated_ids, "is_last": is_last}
|
| 461 |
+
|
| 462 |
+
def postprocess(self, model_outputs, **kwargs) -> dict[str, str]:
|
| 463 |
+
# Handle list of outputs (from chunking)
|
| 464 |
+
if isinstance(model_outputs, list):
|
| 465 |
+
model_outputs = model_outputs[0] if model_outputs else {}
|
| 466 |
+
|
| 467 |
+
tokens = model_outputs.get("tokens")
|
| 468 |
+
if tokens is None:
|
| 469 |
+
return super().postprocess(model_outputs, **kwargs)
|
| 470 |
+
|
| 471 |
+
if torch.is_tensor(tokens):
|
| 472 |
+
tokens = tokens.cpu()
|
| 473 |
+
if tokens.dim() > 1:
|
| 474 |
+
tokens = tokens[0]
|
| 475 |
+
|
| 476 |
+
text = self.tokenizer.decode(tokens, skip_special_tokens=True).strip()
|
| 477 |
+
# Strip <think>...</think> tags (Qwen3 doesn't respect /no_think prompt)
|
| 478 |
+
text = re.sub(r"<think>.*?</think>\s*", "", text, flags=re.DOTALL).strip()
|
| 479 |
+
# Post-process prediction
|
| 480 |
+
text = self._post_process_prediction(text)
|
| 481 |
+
return {"text": text}
|
| 482 |
+
|
| 483 |
+
def _post_process_prediction(self, text: str) -> str:
|
| 484 |
+
"""Post-process model output to fix common issues."""
|
| 485 |
+
if not text:
|
| 486 |
+
return ""
|
| 487 |
+
|
| 488 |
+
original_len = len(text.split())
|
| 489 |
+
|
| 490 |
+
# 1. LOWERCASE
|
| 491 |
+
text = text.lower()
|
| 492 |
+
|
| 493 |
+
# 2. REMOVE REPETITIVE LOOPS
|
| 494 |
+
# If the model repeats the same phrase, keep only one instance.
|
| 495 |
+
words = text.split()
|
| 496 |
+
for n in range(1, min(15, len(words) // 2 + 1)):
|
| 497 |
+
last_sequence = words[-n:]
|
| 498 |
+
repeat_count = 0
|
| 499 |
+
idx = len(words) - n
|
| 500 |
+
while idx >= n and words[idx - n : idx] == last_sequence:
|
| 501 |
+
repeat_count += 1
|
| 502 |
+
idx -= n
|
| 503 |
+
|
| 504 |
+
if repeat_count >= 1:
|
| 505 |
+
words = words[: idx + n]
|
| 506 |
+
text = " ".join(words)
|
| 507 |
+
print(f"[DEBUG] Truncated repetition: {original_len} -> {len(words)} words (n={n}, repeats={repeat_count})")
|
| 508 |
+
break
|
| 509 |
+
|
| 510 |
+
# 3. COMBINE ACRONYMS
|
| 511 |
+
# Merge consecutive single letters into one word (e.g., "u s a" -> "usa")
|
| 512 |
+
text = re.sub(r"\b([a-z])((?:\s+[a-z])+)\b", lambda m: m.group(0).replace(" ", ""), text)
|
| 513 |
+
|
| 514 |
+
# 4. NORMALIZE CURRENCY
|
| 515 |
+
# Convert "eur X" to "X euros" for Whisper normalizer compatibility
|
| 516 |
+
text = re.sub(r"\beur\s+(\d+)", r"\1 euros", text)
|
| 517 |
+
|
| 518 |
+
# 5. STRIP WHITESPACE
|
| 519 |
+
return re.sub(r"\s+", " ", text).strip()
|
asr_processing.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import transformers
|
| 5 |
+
from transformers import ProcessorMixin
|
| 6 |
+
|
| 7 |
+
try:
|
| 8 |
+
from .asr_config import ASRConfig
|
| 9 |
+
except ImportError:
|
| 10 |
+
from asr_config import ASRConfig # type: ignore[no-redef]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ASRProcessor(ProcessorMixin):
|
| 14 |
+
"""Processor for Whisper-based ASR models."""
|
| 15 |
+
|
| 16 |
+
attributes = ["feature_extractor", "tokenizer"]
|
| 17 |
+
feature_extractor_class = "AutoFeatureExtractor"
|
| 18 |
+
tokenizer_class = "AutoTokenizer"
|
| 19 |
+
AUDIO_TOKEN = "<audio>"
|
| 20 |
+
TRANSCRIBE_PROMPT = "Transcribe: "
|
| 21 |
+
# Default conv layers for Whisper/GLM-ASR: [(pad, kernel, stride), ...]
|
| 22 |
+
DEFAULT_ENCODER_CONV_LAYERS = [(1, 3, 1), (1, 3, 2)]
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
feature_extractor,
|
| 27 |
+
tokenizer,
|
| 28 |
+
projector=None,
|
| 29 |
+
encoder_conv_layers: Optional[list] = None,
|
| 30 |
+
):
|
| 31 |
+
self.feature_extractor = feature_extractor
|
| 32 |
+
self.tokenizer = tokenizer
|
| 33 |
+
self.audio_token_id = tokenizer.convert_tokens_to_ids(self.AUDIO_TOKEN)
|
| 34 |
+
self.projector = projector
|
| 35 |
+
self.encoder_conv_layers = encoder_conv_layers or self.DEFAULT_ENCODER_CONV_LAYERS
|
| 36 |
+
|
| 37 |
+
def _compute_encoder_output_length(self, mel_length: int) -> int:
|
| 38 |
+
"""Compute encoder output length using conv layer formulas."""
|
| 39 |
+
length = mel_length
|
| 40 |
+
for padding, kernel_size, stride in self.encoder_conv_layers:
|
| 41 |
+
length = (length + 2 * padding - (kernel_size - 1) - 1) // stride + 1
|
| 42 |
+
return length
|
| 43 |
+
|
| 44 |
+
def __call__(
|
| 45 |
+
self,
|
| 46 |
+
audio: Optional[Union[list, "torch.Tensor"]] = None,
|
| 47 |
+
text: Optional[str] = None,
|
| 48 |
+
system_prompt: Optional[str] = None,
|
| 49 |
+
return_tensors: str = "pt",
|
| 50 |
+
**kwargs,
|
| 51 |
+
) -> dict:
|
| 52 |
+
"""Process audio and text inputs for inference.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
audio: Raw audio waveform(s)
|
| 56 |
+
text: Target transcription (optional, for training - but use DataCollator instead)
|
| 57 |
+
system_prompt: Optional system prompt
|
| 58 |
+
return_tensors: Return format ("pt" for PyTorch)
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
Dict with input_features, input_ids, attention_mask
|
| 62 |
+
"""
|
| 63 |
+
result = {}
|
| 64 |
+
|
| 65 |
+
# Process audio
|
| 66 |
+
if audio is not None:
|
| 67 |
+
audio_inputs = self.feature_extractor(
|
| 68 |
+
audio,
|
| 69 |
+
sampling_rate=getattr(self.feature_extractor, "sampling_rate", 16000),
|
| 70 |
+
return_attention_mask=True,
|
| 71 |
+
return_tensors=return_tensors,
|
| 72 |
+
**kwargs,
|
| 73 |
+
)
|
| 74 |
+
result["input_features"] = audio_inputs["input_features"]
|
| 75 |
+
result["audio_attention_mask"] = audio_inputs["attention_mask"]
|
| 76 |
+
|
| 77 |
+
# Use actual audio length (from attention mask) for token count
|
| 78 |
+
real_mel_len = int(audio_inputs["attention_mask"].sum(dim=-1).max().item())
|
| 79 |
+
encoder_output_len = self._compute_encoder_output_length(real_mel_len)
|
| 80 |
+
num_audio_tokens = self.projector.get_output_length(encoder_output_len)
|
| 81 |
+
else:
|
| 82 |
+
num_audio_tokens = 0
|
| 83 |
+
|
| 84 |
+
# Build prompt with audio token placeholders
|
| 85 |
+
user_content = self.TRANSCRIBE_PROMPT
|
| 86 |
+
if num_audio_tokens > 0:
|
| 87 |
+
user_content += self.AUDIO_TOKEN * num_audio_tokens
|
| 88 |
+
|
| 89 |
+
messages = []
|
| 90 |
+
if system_prompt:
|
| 91 |
+
messages.append({"role": "system", "content": system_prompt})
|
| 92 |
+
messages.append({"role": "user", "content": user_content})
|
| 93 |
+
if text is not None:
|
| 94 |
+
messages.append({"role": "assistant", "content": text})
|
| 95 |
+
|
| 96 |
+
# Tokenize
|
| 97 |
+
tokenized = self.tokenizer.apply_chat_template(
|
| 98 |
+
messages,
|
| 99 |
+
tokenize=True,
|
| 100 |
+
add_generation_prompt=(text is None),
|
| 101 |
+
return_tensors=return_tensors,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
# Handle both tensor and BatchEncoding returns
|
| 105 |
+
if isinstance(tokenized, torch.Tensor):
|
| 106 |
+
input_ids = tokenized
|
| 107 |
+
else:
|
| 108 |
+
# BatchEncoding or dict-like object
|
| 109 |
+
input_ids = tokenized.get("input_ids", tokenized.input_ids)
|
| 110 |
+
|
| 111 |
+
if input_ids.dim() == 1:
|
| 112 |
+
input_ids = input_ids.unsqueeze(0)
|
| 113 |
+
|
| 114 |
+
result["input_ids"] = input_ids
|
| 115 |
+
result["attention_mask"] = torch.ones_like(input_ids)
|
| 116 |
+
|
| 117 |
+
return result
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
ASRProcessor.register_for_auto_class()
|
| 121 |
+
transformers.AutoProcessor.register(ASRConfig, ASRProcessor)
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- if tools %}
|
| 2 |
+
{{- '<|im_start|>system\n' }}
|
| 3 |
+
{%- if messages[0].role == 'system' %}
|
| 4 |
+
{{- messages[0].content + '\n\n' }}
|
| 5 |
+
{%- endif %}
|
| 6 |
+
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
| 7 |
+
{%- for tool in tools %}
|
| 8 |
+
{{- "\n" }}
|
| 9 |
+
{{- tool | tojson }}
|
| 10 |
+
{%- endfor %}
|
| 11 |
+
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
| 12 |
+
{%- else %}
|
| 13 |
+
{%- if messages[0].role == 'system' %}
|
| 14 |
+
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
|
| 15 |
+
{%- endif %}
|
| 16 |
+
{%- endif %}
|
| 17 |
+
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
|
| 18 |
+
{%- for message in messages[::-1] %}
|
| 19 |
+
{%- set index = (messages|length - 1) - loop.index0 %}
|
| 20 |
+
{%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
|
| 21 |
+
{%- set ns.multi_step_tool = false %}
|
| 22 |
+
{%- set ns.last_query_index = index %}
|
| 23 |
+
{%- endif %}
|
| 24 |
+
{%- endfor %}
|
| 25 |
+
{%- for message in messages %}
|
| 26 |
+
{%- if message.content is string %}
|
| 27 |
+
{%- set content = message.content %}
|
| 28 |
+
{%- else %}
|
| 29 |
+
{%- set content = '' %}
|
| 30 |
+
{%- endif %}
|
| 31 |
+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
|
| 32 |
+
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
|
| 33 |
+
{%- elif message.role == "assistant" %}
|
| 34 |
+
{%- set reasoning_content = '' %}
|
| 35 |
+
{%- if message.reasoning_content is string %}
|
| 36 |
+
{%- set reasoning_content = message.reasoning_content %}
|
| 37 |
+
{%- else %}
|
| 38 |
+
{%- if '</think>' in content %}
|
| 39 |
+
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
|
| 40 |
+
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
|
| 41 |
+
{%- endif %}
|
| 42 |
+
{%- endif %}
|
| 43 |
+
{%- if loop.index0 > ns.last_query_index %}
|
| 44 |
+
{%- if loop.last or (not loop.last and reasoning_content) %}
|
| 45 |
+
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
|
| 46 |
+
{%- else %}
|
| 47 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 48 |
+
{%- endif %}
|
| 49 |
+
{%- else %}
|
| 50 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 51 |
+
{%- endif %}
|
| 52 |
+
{%- if message.tool_calls %}
|
| 53 |
+
{%- for tool_call in message.tool_calls %}
|
| 54 |
+
{%- if (loop.first and content) or (not loop.first) %}
|
| 55 |
+
{{- '\n' }}
|
| 56 |
+
{%- endif %}
|
| 57 |
+
{%- if tool_call.function %}
|
| 58 |
+
{%- set tool_call = tool_call.function %}
|
| 59 |
+
{%- endif %}
|
| 60 |
+
{{- '<tool_call>\n{"name": "' }}
|
| 61 |
+
{{- tool_call.name }}
|
| 62 |
+
{{- '", "arguments": ' }}
|
| 63 |
+
{%- if tool_call.arguments is string %}
|
| 64 |
+
{{- tool_call.arguments }}
|
| 65 |
+
{%- else %}
|
| 66 |
+
{{- tool_call.arguments | tojson }}
|
| 67 |
+
{%- endif %}
|
| 68 |
+
{{- '}\n</tool_call>' }}
|
| 69 |
+
{%- endfor %}
|
| 70 |
+
{%- endif %}
|
| 71 |
+
{{- '<|im_end|>\n' }}
|
| 72 |
+
{%- elif message.role == "tool" %}
|
| 73 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
|
| 74 |
+
{{- '<|im_start|>user' }}
|
| 75 |
+
{%- endif %}
|
| 76 |
+
{{- '\n<tool_response>\n' }}
|
| 77 |
+
{{- content }}
|
| 78 |
+
{{- '\n</tool_response>' }}
|
| 79 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
| 80 |
+
{{- '<|im_end|>\n' }}
|
| 81 |
+
{%- endif %}
|
| 82 |
+
{%- endif %}
|
| 83 |
+
{%- endfor %}
|
| 84 |
+
{%- if add_generation_prompt %}
|
| 85 |
+
{{- '<|im_start|>assistant\n' }}
|
| 86 |
+
{%- if enable_thinking is defined and enable_thinking is false %}
|
| 87 |
+
{{- '<think>\n\n</think>\n\n' }}
|
| 88 |
+
{%- endif %}
|
| 89 |
+
{%- endif %}
|
config.json
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"ASRModel"
|
| 4 |
+
],
|
| 5 |
+
"attn_implementation": "sdpa",
|
| 6 |
+
"audio_config": {
|
| 7 |
+
"_name_or_path": "openai/whisper-large-v2",
|
| 8 |
+
"activation_dropout": 0.0,
|
| 9 |
+
"activation_function": "gelu",
|
| 10 |
+
"apply_spec_augment": false,
|
| 11 |
+
"architectures": [
|
| 12 |
+
"WhisperForConditionalGeneration"
|
| 13 |
+
],
|
| 14 |
+
"attention_dropout": 0.0,
|
| 15 |
+
"bos_token_id": 50257,
|
| 16 |
+
"classifier_proj_size": 256,
|
| 17 |
+
"d_model": 1280,
|
| 18 |
+
"decoder_attention_heads": 20,
|
| 19 |
+
"decoder_ffn_dim": 5120,
|
| 20 |
+
"decoder_layerdrop": 0.0,
|
| 21 |
+
"decoder_layers": 32,
|
| 22 |
+
"decoder_start_token_id": 50258,
|
| 23 |
+
"dropout": 0.0,
|
| 24 |
+
"dtype": "bfloat16",
|
| 25 |
+
"encoder_attention_heads": 20,
|
| 26 |
+
"encoder_ffn_dim": 5120,
|
| 27 |
+
"encoder_layerdrop": 0.0,
|
| 28 |
+
"encoder_layers": 32,
|
| 29 |
+
"eos_token_id": 50257,
|
| 30 |
+
"forced_decoder_ids": [
|
| 31 |
+
[
|
| 32 |
+
1,
|
| 33 |
+
50259
|
| 34 |
+
],
|
| 35 |
+
[
|
| 36 |
+
2,
|
| 37 |
+
50359
|
| 38 |
+
],
|
| 39 |
+
[
|
| 40 |
+
3,
|
| 41 |
+
50363
|
| 42 |
+
]
|
| 43 |
+
],
|
| 44 |
+
"init_std": 0.02,
|
| 45 |
+
"mask_feature_length": 10,
|
| 46 |
+
"mask_feature_min_masks": 0,
|
| 47 |
+
"mask_feature_prob": 0.0,
|
| 48 |
+
"mask_time_length": 10,
|
| 49 |
+
"mask_time_min_masks": 2,
|
| 50 |
+
"mask_time_prob": 0.05,
|
| 51 |
+
"max_source_positions": 1500,
|
| 52 |
+
"max_target_positions": 448,
|
| 53 |
+
"median_filter_width": 7,
|
| 54 |
+
"model_type": "whisper",
|
| 55 |
+
"num_hidden_layers": 32,
|
| 56 |
+
"num_mel_bins": 80,
|
| 57 |
+
"pad_token_id": 50257,
|
| 58 |
+
"scale_embedding": false,
|
| 59 |
+
"use_cache": true,
|
| 60 |
+
"use_weighted_layer_sum": false,
|
| 61 |
+
"vocab_size": 51865
|
| 62 |
+
},
|
| 63 |
+
"audio_model_id": "openai/whisper-large-v2",
|
| 64 |
+
"audio_sample_rate": 16000,
|
| 65 |
+
"auto_map": {
|
| 66 |
+
"AutoConfig": "asr_config.ASRConfig",
|
| 67 |
+
"AutoModel": "asr_modeling.ASRModel",
|
| 68 |
+
"AutoModelForSpeechSeq2Seq": "asr_modeling.ASRModel",
|
| 69 |
+
"AutoProcessor": "asr_processing.ASRProcessor"
|
| 70 |
+
},
|
| 71 |
+
"custom_pipelines": {
|
| 72 |
+
"automatic-speech-recognition": {
|
| 73 |
+
"impl": "asr_pipeline.ASRPipeline",
|
| 74 |
+
"pt": [
|
| 75 |
+
"AutoModelForSpeechSeq2Seq"
|
| 76 |
+
],
|
| 77 |
+
"tf": [],
|
| 78 |
+
"type": "audio"
|
| 79 |
+
}
|
| 80 |
+
},
|
| 81 |
+
"downsample_rate": 5,
|
| 82 |
+
"dtype": "bfloat16",
|
| 83 |
+
"encoder_conv_layers": [
|
| 84 |
+
[
|
| 85 |
+
1,
|
| 86 |
+
3,
|
| 87 |
+
1
|
| 88 |
+
],
|
| 89 |
+
[
|
| 90 |
+
1,
|
| 91 |
+
3,
|
| 92 |
+
2
|
| 93 |
+
]
|
| 94 |
+
],
|
| 95 |
+
"encoder_dim": 1280,
|
| 96 |
+
"inference_warmup_tokens": 10,
|
| 97 |
+
"label_smoothing": 0.0,
|
| 98 |
+
"length_penalty": 1.0,
|
| 99 |
+
"llm_dim": 1024,
|
| 100 |
+
"mask_feature_length": 10,
|
| 101 |
+
"mask_feature_min_masks": 0,
|
| 102 |
+
"mask_feature_prob": 0.0,
|
| 103 |
+
"mask_time_length": 10,
|
| 104 |
+
"mask_time_min_masks": 2,
|
| 105 |
+
"mask_time_prob": 0.05,
|
| 106 |
+
"max_new_tokens": 256,
|
| 107 |
+
"min_new_tokens": 0,
|
| 108 |
+
"model_dtype": "bfloat16",
|
| 109 |
+
"model_type": "asr_model",
|
| 110 |
+
"no_repeat_ngram_size": 0,
|
| 111 |
+
"num_beams": 1,
|
| 112 |
+
"num_experts": 4,
|
| 113 |
+
"num_experts_per_tok": 2,
|
| 114 |
+
"pipeline_tag": "automatic-speech-recognition",
|
| 115 |
+
"projector_dropout": 0.0,
|
| 116 |
+
"projector_hidden_dim": null,
|
| 117 |
+
"projector_init_std": 0.02,
|
| 118 |
+
"projector_num_layers": 2,
|
| 119 |
+
"projector_pool_stride": 4,
|
| 120 |
+
"projector_type": "mlp",
|
| 121 |
+
"qformer_hidden_size": null,
|
| 122 |
+
"qformer_intermediate_size": null,
|
| 123 |
+
"qformer_num_heads": 16,
|
| 124 |
+
"qformer_num_layers": 2,
|
| 125 |
+
"qformer_window_size": 15,
|
| 126 |
+
"repetition_penalty": 1.0,
|
| 127 |
+
"router_aux_loss_coef": 0.01,
|
| 128 |
+
"system_prompt": "/no_think /system_override",
|
| 129 |
+
"text_config": {
|
| 130 |
+
"_name_or_path": "Qwen/Qwen3-0.6B",
|
| 131 |
+
"architectures": [
|
| 132 |
+
"Qwen3ForCausalLM"
|
| 133 |
+
],
|
| 134 |
+
"attention_bias": false,
|
| 135 |
+
"attention_dropout": 0.0,
|
| 136 |
+
"dtype": "bfloat16",
|
| 137 |
+
"eos_token_id": 151645,
|
| 138 |
+
"head_dim": 128,
|
| 139 |
+
"hidden_act": "silu",
|
| 140 |
+
"hidden_size": 1024,
|
| 141 |
+
"initializer_range": 0.02,
|
| 142 |
+
"intermediate_size": 3072,
|
| 143 |
+
"layer_types": [
|
| 144 |
+
"full_attention",
|
| 145 |
+
"full_attention",
|
| 146 |
+
"full_attention",
|
| 147 |
+
"full_attention",
|
| 148 |
+
"full_attention",
|
| 149 |
+
"full_attention",
|
| 150 |
+
"full_attention",
|
| 151 |
+
"full_attention",
|
| 152 |
+
"full_attention",
|
| 153 |
+
"full_attention",
|
| 154 |
+
"full_attention",
|
| 155 |
+
"full_attention",
|
| 156 |
+
"full_attention",
|
| 157 |
+
"full_attention",
|
| 158 |
+
"full_attention",
|
| 159 |
+
"full_attention",
|
| 160 |
+
"full_attention",
|
| 161 |
+
"full_attention",
|
| 162 |
+
"full_attention",
|
| 163 |
+
"full_attention",
|
| 164 |
+
"full_attention",
|
| 165 |
+
"full_attention",
|
| 166 |
+
"full_attention",
|
| 167 |
+
"full_attention",
|
| 168 |
+
"full_attention",
|
| 169 |
+
"full_attention",
|
| 170 |
+
"full_attention",
|
| 171 |
+
"full_attention"
|
| 172 |
+
],
|
| 173 |
+
"max_position_embeddings": 40960,
|
| 174 |
+
"max_window_layers": 28,
|
| 175 |
+
"model_type": "qwen3",
|
| 176 |
+
"num_attention_heads": 16,
|
| 177 |
+
"num_hidden_layers": 28,
|
| 178 |
+
"num_key_value_heads": 8,
|
| 179 |
+
"pad_token_id": 151643,
|
| 180 |
+
"rms_norm_eps": 1e-06,
|
| 181 |
+
"rope_parameters": {
|
| 182 |
+
"rope_theta": 1000000,
|
| 183 |
+
"rope_type": "default"
|
| 184 |
+
},
|
| 185 |
+
"sliding_window": null,
|
| 186 |
+
"tie_word_embeddings": true,
|
| 187 |
+
"use_cache": true,
|
| 188 |
+
"use_sliding_window": false,
|
| 189 |
+
"vocab_size": 151670
|
| 190 |
+
},
|
| 191 |
+
"text_model_id": "Qwen/Qwen3-0.6B",
|
| 192 |
+
"transformers_version": "5.0.0.dev0",
|
| 193 |
+
"use_cache": false,
|
| 194 |
+
"use_specaugment": true,
|
| 195 |
+
"user_prompt": "Please transcribe this English audio into text: <audio>",
|
| 196 |
+
"vocab_size": 151670
|
| 197 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"eos_token_id": [
|
| 4 |
+
151645,
|
| 5 |
+
151643
|
| 6 |
+
],
|
| 7 |
+
"length_penalty": 1.0,
|
| 8 |
+
"max_new_tokens": 256,
|
| 9 |
+
"min_new_tokens": 0,
|
| 10 |
+
"no_repeat_ngram_size": 0,
|
| 11 |
+
"num_beams": 1,
|
| 12 |
+
"pad_token_id": 151643,
|
| 13 |
+
"repetition_penalty": 1.0,
|
| 14 |
+
"transformers_version": "5.0.0.dev0"
|
| 15 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dbefaa78e5ba08c65ec808f9c83f50154b45d0a9edbf19841e172c70af59c158
|
| 3 |
+
size 25172384
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chunk_length": 30,
|
| 3 |
+
"dither": 0.0,
|
| 4 |
+
"feature_extractor_type": "WhisperFeatureExtractor",
|
| 5 |
+
"feature_size": 80,
|
| 6 |
+
"hop_length": 160,
|
| 7 |
+
"n_fft": 400,
|
| 8 |
+
"n_samples": 480000,
|
| 9 |
+
"nb_max_frames": 3000,
|
| 10 |
+
"padding_side": "right",
|
| 11 |
+
"padding_value": 0.0,
|
| 12 |
+
"return_attention_mask": false,
|
| 13 |
+
"sampling_rate": 16000,
|
| 14 |
+
"processor_class": "ASRProcessor",
|
| 15 |
+
"auto_map": {
|
| 16 |
+
"AutoProcessor": "asr_processing.ASRProcessor"
|
| 17 |
+
}
|
| 18 |
+
}
|
projectors.py
ADDED
|
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Audio projector modules for bridging encoder and decoder embeddings.
|
| 2 |
+
|
| 3 |
+
This module contains all projector architectures:
|
| 4 |
+
- MLPAudioProjector: Simple 2-layer MLP with frame stacking downsampling
|
| 5 |
+
- MOSAProjector: MOSA-style dense mixture of experts
|
| 6 |
+
- SharedMoEAudioProjector: Shared expert + sparse routed experts
|
| 7 |
+
- QFormerAudioProjector: BLIP-2 QFormer with learnable queries (Granite-style)
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import math
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
import torch.nn as nn
|
| 14 |
+
import torch.nn.functional as F # noqa: N812
|
| 15 |
+
from transformers import AutoModel, Blip2QFormerConfig
|
| 16 |
+
from transformers.models.llama.modeling_llama import LlamaRMSNorm
|
| 17 |
+
|
| 18 |
+
# =============================================================================
|
| 19 |
+
# MLP Projector
|
| 20 |
+
# =============================================================================
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class MLPAudioProjector(nn.Module):
|
| 24 |
+
"""2-layer MLP projector with frame-stacking downsampling (matches GLM-ASR)."""
|
| 25 |
+
|
| 26 |
+
def __init__(self, config):
|
| 27 |
+
super().__init__()
|
| 28 |
+
|
| 29 |
+
encoder_dim = getattr(config, "encoder_dim", 768)
|
| 30 |
+
llm_dim = getattr(config, "llm_dim", 2048)
|
| 31 |
+
self.k = getattr(config, "projector_pool_stride", 2)
|
| 32 |
+
|
| 33 |
+
# Frame stacking: concat k adjacent frames then project
|
| 34 |
+
# Matches GLM-ASR: in_dim -> 2*llm_dim -> llm_dim
|
| 35 |
+
in_dim = encoder_dim * self.k
|
| 36 |
+
hidden_dim = llm_dim * 2
|
| 37 |
+
self.linear_1 = nn.Linear(in_dim, hidden_dim)
|
| 38 |
+
self.act = nn.GELU()
|
| 39 |
+
self.linear_2 = nn.Linear(hidden_dim, llm_dim)
|
| 40 |
+
|
| 41 |
+
def get_output_length(self, input_length: int) -> int:
|
| 42 |
+
"""Calculate output sequence length given input length."""
|
| 43 |
+
return input_length // self.k
|
| 44 |
+
|
| 45 |
+
def forward(self, x):
|
| 46 |
+
"""
|
| 47 |
+
x: [Batch, Seq_Len, Dim]
|
| 48 |
+
Returns: [Batch, Seq_Len // k, llm_dim]
|
| 49 |
+
"""
|
| 50 |
+
batch, seq, dim = x.shape
|
| 51 |
+
# Reshape to combine k frames: [B, S, D] -> [B, -1, D*k]
|
| 52 |
+
# -1 infers sequence length, implicitly downsampling by factor k
|
| 53 |
+
x = x.reshape(batch, -1, dim * self.k)
|
| 54 |
+
|
| 55 |
+
x = self.linear_1(x)
|
| 56 |
+
x = self.act(x)
|
| 57 |
+
return self.linear_2(x)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# =============================================================================
|
| 61 |
+
# MoE Projector (MOSA-style)
|
| 62 |
+
# =============================================================================
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class SimpleAdapter(nn.Module):
|
| 66 |
+
"""Simple 2-layer GELU adapter (from MOSA paper)."""
|
| 67 |
+
|
| 68 |
+
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int):
|
| 69 |
+
super().__init__()
|
| 70 |
+
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
| 71 |
+
self.act = nn.GELU()
|
| 72 |
+
self.fc2 = nn.Linear(hidden_dim, output_dim)
|
| 73 |
+
|
| 74 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 75 |
+
return self.fc2(self.act(self.fc1(x)))
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class SwiGLUExpert(nn.Module):
|
| 79 |
+
"""SwiGLU expert (gated MLP with SiLU activation)."""
|
| 80 |
+
|
| 81 |
+
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int):
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.gate_proj = nn.Linear(input_dim, hidden_dim, bias=False)
|
| 84 |
+
self.up_proj = nn.Linear(input_dim, hidden_dim, bias=False)
|
| 85 |
+
self.down_proj = nn.Linear(hidden_dim, output_dim, bias=False)
|
| 86 |
+
|
| 87 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 88 |
+
return self.down_proj(F.silu(self.gate_proj(x)) * self.up_proj(x))
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class MOSAProjector(nn.Module):
|
| 92 |
+
"""MOSA-Base projector: simple 2-layer ReLU router with 4 simple adapters.
|
| 93 |
+
|
| 94 |
+
Based on "MOSA: Mixtures of Simple Adapters" (arXiv:2508.18998).
|
| 95 |
+
Uses softmax gating over all experts (dense MoE) with only cross-entropy loss.
|
| 96 |
+
Uses frame-stacking for downsampling (like MLP projector).
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
def __init__(self, config):
|
| 100 |
+
super().__init__()
|
| 101 |
+
self.encoder_dim = getattr(config, "encoder_dim", None) or 1280
|
| 102 |
+
self.llm_dim = getattr(config, "llm_dim", None) or 2048
|
| 103 |
+
self.k = getattr(config, "projector_pool_stride", 4)
|
| 104 |
+
self.num_experts = getattr(config, "num_experts", None) or 4 # MOSA-Base uses 4
|
| 105 |
+
adapter_hidden = getattr(config, "adapter_hidden_dim", None) or 4096
|
| 106 |
+
|
| 107 |
+
# Frame stacking: concat k adjacent frames then project
|
| 108 |
+
in_dim = self.encoder_dim * self.k
|
| 109 |
+
|
| 110 |
+
# --- 1. Simple Router (MOSA-Base: 2 layers with ReLU) ---
|
| 111 |
+
# Maps encoder_dim -> 512 -> num_experts
|
| 112 |
+
router_hidden = getattr(config, "router_hidden_dim", None) or 512
|
| 113 |
+
self.router = nn.Sequential(
|
| 114 |
+
nn.Linear(self.encoder_dim, router_hidden),
|
| 115 |
+
nn.ReLU(),
|
| 116 |
+
nn.Linear(router_hidden, self.num_experts),
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
# --- 2. Experts (Simple 2-layer GELU adapters) ---
|
| 120 |
+
# Each expert: in_dim (stacked frames) -> hidden -> llm_dim
|
| 121 |
+
self.experts = nn.ModuleList(
|
| 122 |
+
[SimpleAdapter(in_dim, adapter_hidden, self.llm_dim) for _ in range(self.num_experts)]
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
def forward(self, x):
|
| 126 |
+
# x: (B, S, encoder_dim)
|
| 127 |
+
batch_size, seq_len, dim = x.shape
|
| 128 |
+
|
| 129 |
+
# --- 1. Router Branch ---
|
| 130 |
+
# Mean pool encoder outputs for routing decisions
|
| 131 |
+
x_pooled = x.reshape(batch_size, -1, self.k, self.encoder_dim).mean(dim=2) # (B, S//k, D)
|
| 132 |
+
|
| 133 |
+
# Router logits and softmax gating (dense MoE)
|
| 134 |
+
routing_weights = F.softmax(self.router(x_pooled), dim=-1) # (B, S//k, num_experts)
|
| 135 |
+
|
| 136 |
+
# --- 2. Frame stacking for experts ---
|
| 137 |
+
# Reshape to combine k frames: [B, S, D] -> [B, S//k, D*k]
|
| 138 |
+
x_stacked = x.reshape(batch_size, -1, dim * self.k)
|
| 139 |
+
|
| 140 |
+
# --- 3. Expert Mixture (Dense Execution) ---
|
| 141 |
+
# Run all experts and compute weighted sum
|
| 142 |
+
expert_outputs = torch.stack(
|
| 143 |
+
[expert(x_stacked) for expert in self.experts]
|
| 144 |
+
) # (E, B, S//k, D)
|
| 145 |
+
return torch.einsum("ebsd, bse -> bsd", expert_outputs, routing_weights)
|
| 146 |
+
|
| 147 |
+
def get_output_length(self, input_length: int) -> int:
|
| 148 |
+
"""Calculate output sequence length given input length."""
|
| 149 |
+
return input_length // self.k
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
# =============================================================================
|
| 153 |
+
# MoE Projector (Shared Expert + Sparse Routed Experts)
|
| 154 |
+
# =============================================================================
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class SharedMoEBlock(nn.Module):
|
| 158 |
+
"""MoE block with Shared + Sigmoid-Routed Experts."""
|
| 159 |
+
|
| 160 |
+
def __init__(
|
| 161 |
+
self,
|
| 162 |
+
input_dim: int,
|
| 163 |
+
hidden_dim: int,
|
| 164 |
+
output_dim: int,
|
| 165 |
+
num_experts: int = 4,
|
| 166 |
+
top_k: int = 2,
|
| 167 |
+
):
|
| 168 |
+
super().__init__()
|
| 169 |
+
self.num_experts = num_experts
|
| 170 |
+
self.top_k = top_k
|
| 171 |
+
self.output_dim = output_dim
|
| 172 |
+
|
| 173 |
+
# RMSNorm before routing
|
| 174 |
+
self.norm = LlamaRMSNorm(input_dim, eps=1e-8)
|
| 175 |
+
|
| 176 |
+
self.router = nn.Linear(input_dim, num_experts, bias=False)
|
| 177 |
+
nn.init.normal_(self.router.weight, mean=0.0, std=0.02)
|
| 178 |
+
|
| 179 |
+
self.shared_expert = SimpleAdapter(input_dim, hidden_dim, output_dim)
|
| 180 |
+
self.experts = nn.ModuleList(
|
| 181 |
+
[SimpleAdapter(input_dim, hidden_dim, output_dim) for _ in range(num_experts)]
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
self.last_router_logits = None
|
| 185 |
+
self.last_router_probs = None
|
| 186 |
+
|
| 187 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 188 |
+
batch_size, seq_len, dim = hidden_states.shape
|
| 189 |
+
|
| 190 |
+
# 1. Apply Shared Expert
|
| 191 |
+
normed_states = self.norm(hidden_states)
|
| 192 |
+
shared_out = self.shared_expert(normed_states)
|
| 193 |
+
|
| 194 |
+
# 2. Router Logic (Sigmoid Style)
|
| 195 |
+
flat_hidden = normed_states.view(-1, dim)
|
| 196 |
+
router_logits = self.router(flat_hidden)
|
| 197 |
+
|
| 198 |
+
# Sigmoid routing
|
| 199 |
+
router_probs = torch.sigmoid(router_logits)
|
| 200 |
+
|
| 201 |
+
self.last_router_logits = router_logits
|
| 202 |
+
self.last_router_probs = router_probs
|
| 203 |
+
|
| 204 |
+
# 3. Top-K Selection
|
| 205 |
+
top_k_scores, top_k_indices = torch.topk(router_probs, self.top_k, dim=-1)
|
| 206 |
+
|
| 207 |
+
# Normalize weights
|
| 208 |
+
top_k_weights = top_k_scores / (top_k_scores.sum(dim=-1, keepdim=True) + 1e-6)
|
| 209 |
+
top_k_weights = top_k_weights.to(hidden_states.dtype)
|
| 210 |
+
|
| 211 |
+
# 4. Dispatch
|
| 212 |
+
routed_out = self._dispatch_experts(flat_hidden, top_k_indices, top_k_weights)
|
| 213 |
+
routed_out = routed_out.view(batch_size, seq_len, -1)
|
| 214 |
+
|
| 215 |
+
return shared_out + routed_out
|
| 216 |
+
|
| 217 |
+
def _dispatch_experts(
|
| 218 |
+
self,
|
| 219 |
+
hidden_states: torch.Tensor,
|
| 220 |
+
top_k_indices: torch.Tensor,
|
| 221 |
+
top_k_weights: torch.Tensor,
|
| 222 |
+
) -> torch.Tensor:
|
| 223 |
+
num_tokens = hidden_states.shape[0]
|
| 224 |
+
output = torch.zeros(
|
| 225 |
+
num_tokens, self.output_dim, device=hidden_states.device, dtype=hidden_states.dtype
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
for expert_idx, expert in enumerate(self.experts):
|
| 229 |
+
expert_mask = top_k_indices == expert_idx
|
| 230 |
+
if not expert_mask.any():
|
| 231 |
+
continue
|
| 232 |
+
|
| 233 |
+
token_indices, slot_indices = torch.where(expert_mask)
|
| 234 |
+
expert_input = hidden_states[token_indices]
|
| 235 |
+
expert_output = expert(expert_input).to(output.dtype)
|
| 236 |
+
weights = top_k_weights[token_indices, slot_indices].unsqueeze(-1)
|
| 237 |
+
output.index_add_(0, token_indices, expert_output * weights)
|
| 238 |
+
|
| 239 |
+
return output
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def load_balancing_loss(router_probs: torch.Tensor, num_experts: int, top_k: int) -> torch.Tensor:
|
| 243 |
+
"""Auxiliary loss to encourage balanced expert usage."""
|
| 244 |
+
prob_per_expert = router_probs.mean(dim=0)
|
| 245 |
+
target_mean = prob_per_expert.mean()
|
| 246 |
+
return (prob_per_expert - target_mean).square().sum() * num_experts
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def z_loss(router_logits: torch.Tensor) -> torch.Tensor:
|
| 250 |
+
"""Z-loss to prevent router logits from growing too large."""
|
| 251 |
+
return torch.logsumexp(router_logits.float(), dim=-1).square().mean()
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class MoEAudioProjector(nn.Module):
|
| 255 |
+
"""MoE projector with shared expert + sparse routed experts."""
|
| 256 |
+
|
| 257 |
+
def __init__(self, config):
|
| 258 |
+
super().__init__()
|
| 259 |
+
|
| 260 |
+
self.k = getattr(config, "projector_pool_stride", 4)
|
| 261 |
+
encoder_dim = config.encoder_dim
|
| 262 |
+
|
| 263 |
+
# Depthwise Conv for temporal mixing
|
| 264 |
+
self.temporal_conv = nn.Conv1d(
|
| 265 |
+
encoder_dim, encoder_dim, kernel_size=3, padding=1, groups=encoder_dim
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
in_dim = encoder_dim * self.k
|
| 269 |
+
out_dim = config.llm_dim
|
| 270 |
+
hidden_dim = getattr(config, "projector_hidden_dim", None) or in_dim
|
| 271 |
+
|
| 272 |
+
self.num_experts = getattr(config, "num_experts", 4)
|
| 273 |
+
self.top_k = getattr(config, "num_experts_per_tok", 2)
|
| 274 |
+
self.aux_loss_coef = getattr(config, "router_aux_loss_coef", 0.02)
|
| 275 |
+
self.z_loss_coef = getattr(config, "router_z_loss_coef", 0.001)
|
| 276 |
+
|
| 277 |
+
self.moe = SharedMoEBlock(in_dim, hidden_dim, out_dim, self.num_experts, self.top_k)
|
| 278 |
+
self._init_weights()
|
| 279 |
+
|
| 280 |
+
def _init_weights(self):
|
| 281 |
+
with torch.no_grad():
|
| 282 |
+
nn.init.orthogonal_(self.moe.shared_expert.fc1.weight)
|
| 283 |
+
nn.init.orthogonal_(self.moe.shared_expert.fc2.weight, gain=0.5)
|
| 284 |
+
|
| 285 |
+
for expert in self.moe.experts:
|
| 286 |
+
nn.init.orthogonal_(expert.fc1.weight)
|
| 287 |
+
nn.init.orthogonal_(expert.fc2.weight, gain=0.01)
|
| 288 |
+
|
| 289 |
+
def get_output_length(self, input_length: int) -> int:
|
| 290 |
+
"""Calculate output sequence length given input length."""
|
| 291 |
+
# Temporal pooling with stride k
|
| 292 |
+
if input_length % self.k:
|
| 293 |
+
input_length += self.k - input_length % self.k
|
| 294 |
+
return input_length // self.k
|
| 295 |
+
|
| 296 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 297 |
+
batch_size, seq_len, dim = x.size()
|
| 298 |
+
|
| 299 |
+
target_dtype = self.moe.shared_expert.fc1.weight.dtype
|
| 300 |
+
if x.dtype != target_dtype:
|
| 301 |
+
x = x.to(target_dtype)
|
| 302 |
+
|
| 303 |
+
# Temporal Context Injection
|
| 304 |
+
x_ctx = x.transpose(1, 2)
|
| 305 |
+
x_ctx = self.temporal_conv(x_ctx)
|
| 306 |
+
x = x + x_ctx.transpose(1, 2)
|
| 307 |
+
|
| 308 |
+
if seq_len % self.k:
|
| 309 |
+
x = F.pad(x, (0, 0, 0, self.k - seq_len % self.k))
|
| 310 |
+
|
| 311 |
+
x = x.view(batch_size, -1, dim * self.k)
|
| 312 |
+
|
| 313 |
+
return self.moe(x)
|
| 314 |
+
|
| 315 |
+
def get_aux_loss(self) -> torch.Tensor:
|
| 316 |
+
if self.moe.last_router_logits is None:
|
| 317 |
+
return torch.tensor(0.0, device=self.moe.router.weight.device)
|
| 318 |
+
|
| 319 |
+
balance = load_balancing_loss(self.moe.last_router_probs, self.num_experts, self.top_k)
|
| 320 |
+
z = z_loss(self.moe.last_router_logits)
|
| 321 |
+
|
| 322 |
+
return self.aux_loss_coef * balance + self.z_loss_coef * z
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
# =============================================================================
|
| 326 |
+
# QFormer Projector (Granite-style)
|
| 327 |
+
# =============================================================================
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
class QFormerAudioProjector(nn.Module):
|
| 331 |
+
"""
|
| 332 |
+
BLIP-2 QFormer projector with learnable queries.
|
| 333 |
+
|
| 334 |
+
Based on GraniteSpeechEncoderProjector - uses a QFormer model with learnable
|
| 335 |
+
query embeddings to compress and project audio encoder outputs. The audio
|
| 336 |
+
sequence is processed in windows and downsampled via cross-attention.
|
| 337 |
+
"""
|
| 338 |
+
|
| 339 |
+
def __init__(self, config):
|
| 340 |
+
super().__init__()
|
| 341 |
+
|
| 342 |
+
encoder_dim = config.encoder_dim
|
| 343 |
+
llm_dim = config.llm_dim
|
| 344 |
+
|
| 345 |
+
# Window and downsampling parameters (Granite defaults: window=15, downsample=5)
|
| 346 |
+
self.window_size = getattr(config, "qformer_window_size", 15)
|
| 347 |
+
self.downsample_rate = getattr(config, "downsample_rate", 5)
|
| 348 |
+
self.num_queries = self.window_size // self.downsample_rate
|
| 349 |
+
|
| 350 |
+
# QFormer hidden size (matches encoder for cross-attention)
|
| 351 |
+
qformer_hidden = getattr(config, "qformer_hidden_size", None) or encoder_dim
|
| 352 |
+
qformer_num_layers = getattr(config, "qformer_num_layers", 2)
|
| 353 |
+
qformer_num_heads = getattr(config, "qformer_num_heads", 16)
|
| 354 |
+
qformer_intermediate = getattr(config, "qformer_intermediate_size", None) or (
|
| 355 |
+
qformer_hidden * 4
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
# Learnable query embeddings (Granite uses std=1.0)
|
| 359 |
+
self.query = nn.Parameter(torch.zeros(1, self.num_queries, qformer_hidden))
|
| 360 |
+
self.query.data.normal_(mean=0.0, std=1.0)
|
| 361 |
+
|
| 362 |
+
# Optional projection if encoder dim != qformer hidden
|
| 363 |
+
if encoder_dim != qformer_hidden:
|
| 364 |
+
self.encoder_proj = nn.Linear(encoder_dim, qformer_hidden, bias=False)
|
| 365 |
+
else:
|
| 366 |
+
self.encoder_proj = None
|
| 367 |
+
|
| 368 |
+
# Configure QFormer to match Granite's exact config
|
| 369 |
+
qformer_config = Blip2QFormerConfig(
|
| 370 |
+
hidden_size=qformer_hidden,
|
| 371 |
+
num_hidden_layers=qformer_num_layers,
|
| 372 |
+
num_attention_heads=qformer_num_heads,
|
| 373 |
+
intermediate_size=qformer_intermediate,
|
| 374 |
+
encoder_hidden_size=qformer_hidden,
|
| 375 |
+
cross_attention_frequency=1,
|
| 376 |
+
# Granite-specific settings
|
| 377 |
+
hidden_act="gelu",
|
| 378 |
+
attention_probs_dropout_prob=0.1,
|
| 379 |
+
hidden_dropout_prob=0.1,
|
| 380 |
+
layer_norm_eps=1e-12,
|
| 381 |
+
initializer_range=0.02,
|
| 382 |
+
)
|
| 383 |
+
self.qformer = AutoModel.from_config(qformer_config)
|
| 384 |
+
|
| 385 |
+
# Final projection to LLM dimension (Granite uses bias=True)
|
| 386 |
+
self.linear = nn.Linear(qformer_hidden, llm_dim)
|
| 387 |
+
|
| 388 |
+
def get_output_length(self, input_length: int) -> int:
|
| 389 |
+
"""Calculate output sequence length given input length."""
|
| 390 |
+
# QFormer uses window-based processing with num_queries per window
|
| 391 |
+
nblocks = math.ceil(input_length / self.window_size)
|
| 392 |
+
return nblocks * self.num_queries
|
| 393 |
+
|
| 394 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 395 |
+
"""
|
| 396 |
+
Args:
|
| 397 |
+
hidden_states: [batch_size, seq_len, encoder_dim]
|
| 398 |
+
|
| 399 |
+
Returns:
|
| 400 |
+
projected: [batch_size, num_output_tokens, llm_dim]
|
| 401 |
+
"""
|
| 402 |
+
batch_size, seq_len, dim = hidden_states.size()
|
| 403 |
+
|
| 404 |
+
# Ensure float dtype for QFormer
|
| 405 |
+
target_dtype = self.query.dtype
|
| 406 |
+
if hidden_states.dtype != target_dtype:
|
| 407 |
+
hidden_states = hidden_states.to(target_dtype)
|
| 408 |
+
|
| 409 |
+
# Optional encoder projection
|
| 410 |
+
if self.encoder_proj is not None:
|
| 411 |
+
hidden_states = self.encoder_proj(hidden_states)
|
| 412 |
+
|
| 413 |
+
# Compute number of windows and pad to fit
|
| 414 |
+
nblocks = math.ceil(seq_len / self.window_size)
|
| 415 |
+
pad = nblocks * self.window_size - seq_len
|
| 416 |
+
if pad > 0:
|
| 417 |
+
hidden_states = F.pad(hidden_states, (0, 0, 0, pad), "constant", 0)
|
| 418 |
+
|
| 419 |
+
# Reshape to process each window: [batch*nblocks, window_size, dim]
|
| 420 |
+
effective_batch = batch_size * nblocks
|
| 421 |
+
hidden_states = hidden_states.view(effective_batch, self.window_size, -1)
|
| 422 |
+
|
| 423 |
+
# Expand queries to match batch size
|
| 424 |
+
query_embeds = self.query.expand(effective_batch, -1, -1)
|
| 425 |
+
|
| 426 |
+
# QFormer cross-attention
|
| 427 |
+
query_output = self.qformer(
|
| 428 |
+
query_embeds=query_embeds,
|
| 429 |
+
encoder_hidden_states=hidden_states,
|
| 430 |
+
return_dict=True,
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
# Reshape back: [batch, nblocks * num_queries, hidden]
|
| 434 |
+
output_tokens = nblocks * self.num_queries
|
| 435 |
+
query_proj = query_output.last_hidden_state.view(batch_size, output_tokens, -1)
|
| 436 |
+
|
| 437 |
+
# Project to LLM dimension
|
| 438 |
+
return self.linear(query_proj)
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
# =============================================================================
|
| 442 |
+
# Projector Registry
|
| 443 |
+
# =============================================================================
|
| 444 |
+
|
| 445 |
+
PROJECTOR_CLASSES = {
|
| 446 |
+
"mlp": MLPAudioProjector,
|
| 447 |
+
"mosa": MOSAProjector,
|
| 448 |
+
"moe": MoEAudioProjector,
|
| 449 |
+
"qformer": QFormerAudioProjector,
|
| 450 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33b674fb8444e2553eae8f1b261093371920a28ef75b5c18f4deb3f9217ed0ba
|
| 3 |
+
size 11422834
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"backend": "tokenizers",
|
| 4 |
+
"bos_token": null,
|
| 5 |
+
"clean_up_tokenization_spaces": false,
|
| 6 |
+
"eos_token": "<|im_end|>",
|
| 7 |
+
"errors": "replace",
|
| 8 |
+
"extra_special_tokens": [
|
| 9 |
+
"<audio>"
|
| 10 |
+
],
|
| 11 |
+
"is_local": false,
|
| 12 |
+
"model_max_length": 131072,
|
| 13 |
+
"pad_token": "<|endoftext|>",
|
| 14 |
+
"split_special_tokens": false,
|
| 15 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 16 |
+
"unk_token": null
|
| 17 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a17d64397e24390e9613fe100197f47f3d9978e5d2a100bf14aa35a33fffc55
|
| 3 |
+
size 5201
|