speechisalluneed commited on
Commit
46475d4
·
verified ·
1 Parent(s): 8536cb7

Upload folder using huggingface_hub

Browse files
__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .configuration_fastslm import FastSLMConfig
2
+ from .modeling_fastslm import FastSLMForConditionalGeneration # FastALMForCausalLM
3
+
4
+ from transformers import AutoConfig, AutoModelForCausalLM
5
+
6
+ AutoConfig.register("fastslm", FastSLMConfig)
7
+ AutoModelForCausalLM.register(FastSLMConfig, FastSLMForConditionalGeneration)
__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/__pycache__/__init__.cpython-310.pyc and b/__pycache__/__init__.cpython-310.pyc differ
 
__pycache__/configuration_fastslm.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
__pycache__/modeling_fastslm.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
__pycache__/modeling_whisper.cpython-310.pyc CHANGED
Binary files a/__pycache__/modeling_whisper.cpython-310.pyc and b/__pycache__/modeling_whisper.cpython-310.pyc differ
 
config.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
  "architectures": [
3
- "FastALMForConditionalGeneration"
4
  ],
5
  "encoder_config": {
6
  "compression_size": 50,
7
- "model_type": "fastalm_speech_encoder",
8
  "n_ctx": 1500,
9
  "n_head": 20,
10
  "n_layer": 32,
@@ -58,7 +58,7 @@
58
  "lora_a": 64,
59
  "lora_r": 16,
60
  "low_resource": false,
61
- "model_type": "fastalm",
62
  "torch_dtype": "float32",
63
  "transformers_version": "4.51.3"
64
  }
 
1
  {
2
  "architectures": [
3
+ "FastSLMForConditionalGeneration"
4
  ],
5
  "encoder_config": {
6
  "compression_size": 50,
7
+ "model_type": "fastslm_speech_encoder",
8
  "n_ctx": 1500,
9
  "n_head": 20,
10
  "n_layer": 32,
 
58
  "lora_a": 64,
59
  "lora_r": 16,
60
  "low_resource": false,
61
+ "model_type": "fastslm",
62
  "torch_dtype": "float32",
63
  "transformers_version": "4.51.3"
64
  }
configuration_fastslm.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig, AutoConfig
2
+
3
+ class FastSLMSpeechEncoderConfig(PretrainedConfig):
4
+ model_type = "fastslm_speech_encoder"
5
+ def __init__(
6
+ self,
7
+ n_mels=128,
8
+ n_ctx=1500,
9
+ n_state=1280,
10
+ n_head=20,
11
+ n_layer=32,
12
+ stage_tokens=[80, 80, 80],
13
+ compression_size=50,
14
+ **kwargs
15
+ ):
16
+ super().__init__(**kwargs)
17
+ self.n_mels = n_mels
18
+ self.n_ctx = n_ctx
19
+ self.n_state = n_state
20
+ self.n_head = n_head
21
+ self.n_layer = n_layer
22
+ self.stage_tokens = stage_tokens
23
+ self.compression_size = compression_size
24
+
25
+ class FastSLMConfig(PretrainedConfig):
26
+ model_type = "fastslm"
27
+ def __init__(
28
+ self,
29
+ encoder_config=None,
30
+ llm_config=None,
31
+ lora_r=16,
32
+ lora_a=64,
33
+ llm_modules=None,
34
+ low_resource=False,
35
+ **kwargs
36
+ ):
37
+ # llm_modules 기본값
38
+ if llm_modules is None:
39
+ llm_modules = ["q_proj","k_proj","v_proj","o_proj","gate_proj","up_proj","down_proj"]
40
+
41
+ # LLM config 처리: dict면 AutoConfig로 변환
42
+ if llm_config is None:
43
+ llm_config = AutoConfig.from_pretrained("Qwen/Qwen3-4B")
44
+ elif isinstance(llm_config, dict):
45
+ if "_name_or_path" in llm_config:
46
+ llm_config = AutoConfig.from_pretrained(llm_config["_name_or_path"], **llm_config)
47
+ else:
48
+ llm_config = AutoConfig.from_dict(llm_config)
49
+
50
+ # Encoder config 처리
51
+ if encoder_config is None:
52
+ encoder_config = FastSLMSpeechEncoderConfig()
53
+ elif isinstance(encoder_config, dict):
54
+ encoder_config = FastSLMSpeechEncoderConfig(**encoder_config)
55
+
56
+ self.llm_config = llm_config
57
+ self.encoder_config = encoder_config
58
+ self.lora_r = lora_r
59
+ self.lora_a = lora_a
60
+ self.llm_modules = llm_modules
61
+ self.low_resource = low_resource
62
+
63
+ super().__init__(**kwargs)
generation_config.json CHANGED
@@ -1,4 +1,9 @@
1
  {
2
- "_from_model_config": true,
 
 
 
 
 
3
  "transformers_version": "4.51.3"
4
  }
 
1
  {
2
+ "do_sample": true,
3
+ "eos_token_id": 151645,
4
+ "pad_token_id": 151643,
5
+ "temperature": 0.5,
6
+ "top_k": 20,
7
+ "top_p": 0.95,
8
  "transformers_version": "4.51.3"
9
  }
llm/adapter_config.json CHANGED
@@ -23,10 +23,10 @@
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
- "gate_proj",
27
- "q_proj",
28
- "k_proj",
29
  "v_proj",
 
 
 
30
  "o_proj",
31
  "up_proj",
32
  "down_proj"
 
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
 
 
 
26
  "v_proj",
27
+ "k_proj",
28
+ "q_proj",
29
+ "gate_proj",
30
  "o_proj",
31
  "up_proj",
32
  "down_proj"
modeling_fastslm.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FastALM/modeling_fastalm.py
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torchaudio
6
+ import torch.nn.functional as F
7
+ import numpy as np
8
+ from torch import Tensor
9
+ import whisper
10
+ from einops import rearrange
11
+ from typing import Optional, List
12
+
13
+ from peft import (
14
+ LoraConfig,
15
+ get_peft_model
16
+ )
17
+ from transformers import (
18
+ AutoModelForCausalLM,
19
+ AutoTokenizer,
20
+ PreTrainedModel,
21
+ GenerationMixin,
22
+ AutoConfig
23
+ )
24
+ from .modeling_whisper import AudioEncoder
25
+ from .configuration_fastslm import FastSLMConfig
26
+ # Check for scaled_dot_product_attention availability
27
+ try:
28
+ from torch.nn.functional import scaled_dot_product_attention
29
+ SDPA_AVAILABLE = True
30
+ except (ImportError, RuntimeError, OSError):
31
+ scaled_dot_product_attention = None
32
+ SDPA_AVAILABLE = False
33
+
34
+ LANGUAGES = {
35
+ "en": "english",
36
+ "ko": "korean"
37
+ }
38
+
39
+ def set_trainable_parameters(module, requires_grad=False):
40
+ for param in module.parameters():
41
+ param.requires_grad = requires_grad
42
+ module._requires_grad = requires_grad
43
+
44
+ # --- Helper Modules (Compressor, MHSA, Attention, Downsampler) ---
45
+
46
+ class Compressor(nn.Module):
47
+ def __init__(self, embed_dim, num_heads, num_query, n_ctx):
48
+ super().__init__()
49
+ self.num_heads = num_heads
50
+ self.head_dims = embed_dim // num_heads
51
+ self.n_ctx = n_ctx
52
+
53
+ self.query = nn.Parameter(torch.randn(1, num_query, embed_dim))
54
+ nn.init.normal_(self.query, mean=0.0, std=0.02)
55
+
56
+ self.q_ln = nn.LayerNorm(embed_dim, eps=1e-5)
57
+ self.kv_ln = nn.LayerNorm(embed_dim, eps=1e-5)
58
+
59
+ self.kv_proj = nn.Identity()
60
+ self.out_proj = nn.Linear(embed_dim, embed_dim)
61
+
62
+ self.register_buffer("q_pos_embeds", self.sinusoids(num_query, embed_dim))
63
+ self.register_buffer("kv_pos_embeds", self.sinusoids(n_ctx, embed_dim))
64
+
65
+ self.init_weights()
66
+
67
+ def init_weights(self):
68
+ nn.init.constant_(self.q_ln.bias, 0)
69
+ nn.init.constant_(self.q_ln.weight, 1.0)
70
+ nn.init.constant_(self.kv_ln.bias, 0)
71
+ nn.init.constant_(self.kv_ln.weight, 1.0)
72
+
73
+ def sinusoids(self, length, channels, max_timescale=10000):
74
+ assert channels % 2 == 0
75
+ log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
76
+ inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
77
+ scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
78
+ return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
79
+
80
+ def forward(self, x: Tensor):
81
+ q = self.q_ln(self.query.to(x.device))
82
+ x = self.kv_ln(self.kv_proj(x))
83
+
84
+ q = rearrange(q + self.q_pos_embeds, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
85
+ k = rearrange(x + self.kv_pos_embeds, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
86
+ v = rearrange(x, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
87
+
88
+ attn = scaled_dot_product_attention(q, k, v)
89
+ attn = rearrange(attn, 'b h l d -> b l (h d)')
90
+ x = self.out_proj(attn)
91
+ return x
92
+
93
+ class MHSA(nn.Module):
94
+ def __init__(self, embed_dim, num_heads):
95
+ super().__init__()
96
+ self.embed_dim = embed_dim
97
+ self.num_heads = num_heads
98
+ self.head_dims = embed_dim // num_heads
99
+ self.q = nn.Linear(embed_dim, embed_dim, bias=True)
100
+ self.k = nn.Linear(embed_dim, embed_dim, bias=False)
101
+ self.v = nn.Linear(embed_dim, embed_dim, bias=True)
102
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True)
103
+
104
+ def forward(self, x, xa=None, mask=None):
105
+ q = self.q(x)
106
+ k = self.k(x if xa is None else xa)
107
+ v = self.v(x if xa is None else xa)
108
+
109
+ q = rearrange(q, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
110
+ k = rearrange(k, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
111
+ v = rearrange(v, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
112
+
113
+ attn = scaled_dot_product_attention(q, k, v, is_causal=mask is not None)
114
+ attn = rearrange(attn, 'b h l d -> b l (h d)')
115
+
116
+ out = self.out_proj(attn)
117
+ return out
118
+
119
+ class Attention(nn.Module):
120
+ def __init__(self, embed_dim, num_heads):
121
+ super().__init__()
122
+ self.attn = MHSA(embed_dim=embed_dim, num_heads=num_heads)
123
+ self.cross_attn = MHSA(embed_dim=embed_dim, num_heads=num_heads)
124
+ self.norm1 = nn.LayerNorm(embed_dim, eps=1e-5)
125
+ self.norm2 = nn.LayerNorm(embed_dim, eps=1e-5)
126
+
127
+ def forward(self, x: Tensor, xa: Optional[Tensor] = None):
128
+ x = x + self.attn(self.norm1(x))
129
+ x = x + self.cross_attn(x=self.norm2(x), xa=xa)
130
+ return x
131
+
132
+ class Downsampler(nn.Module):
133
+ def __init__(self, embed_dim: int):
134
+ super().__init__()
135
+ self.conv1 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, padding=1)
136
+ self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)
137
+ self.ln_post = nn.LayerNorm(embed_dim, eps=1e-5)
138
+
139
+ def forward(self, x: Tensor):
140
+ x = F.gelu(self.conv1(x))
141
+ x = F.gelu(self.conv2(x))
142
+ x = x.permute(0, 2, 1)
143
+ x = self.ln_post(x)
144
+ return x
145
+
146
+ # --- Speech Encoder Module ---
147
+
148
+ class SpeechEncoder(nn.Module):
149
+ def __init__(self, config: FastSLMConfig):
150
+ super().__init__()
151
+ # Initialize the Whisper encoder from its specific sub-configuration
152
+ self._device = 'cuda' if torch.cuda.is_available() else 'cpu'
153
+ self.whisper = AudioEncoder(
154
+ n_mels=config.encoder_config.n_mels,
155
+ n_ctx=config.encoder_config.n_ctx,
156
+ n_state=config.encoder_config.n_state,
157
+ n_head=config.encoder_config.n_head,
158
+ n_layer=config.encoder_config.n_layer
159
+ )
160
+ self.n_mels = config.encoder_config.n_mels
161
+ # Freeze the Whisper encoder as it's not trained
162
+ for param in self.whisper.parameters():
163
+ param.requires_grad = False
164
+
165
+ # Initialize the projection layer to match the LLM's hidden dimension
166
+ self.llm_proj = nn.Linear(config.encoder_config.n_state, config.llm_config.hidden_size)
167
+
168
+ # Initialize the hierarchical compressors using parameters from the config
169
+ num_heads = config.encoder_config.n_head
170
+ stage_tokens = config.encoder_config.stage_tokens
171
+ self.compression_size = config.encoder_config.compression_size
172
+ self.n_state = config.encoder_config.n_state
173
+ self.low_resource = config.low_resource
174
+
175
+ self.compressor1 = Compressor(config.encoder_config.n_state, num_heads, stage_tokens[0], 1500)
176
+ self.stage1 = Downsampler(config.encoder_config.n_state)
177
+ self.compressor2 = Compressor(config.encoder_config.n_state, num_heads, stage_tokens[1], 750)
178
+ self.stage2 = Downsampler(config.encoder_config.n_state)
179
+ self.compressor3 = Compressor(config.encoder_config.n_state, num_heads, stage_tokens[2], 375)
180
+ self.compressor = Compressor(config.encoder_config.n_state, num_heads, self.compression_size, sum(stage_tokens))
181
+
182
+ self.out_attn = nn.ModuleList([
183
+ Attention(config.encoder_config.n_state, num_heads) for _ in range(2)
184
+ ])
185
+
186
+ def embed_audio(self, mel: torch.Tensor):
187
+ output = self.whisper(mel)
188
+ # return output.last_hidden_state
189
+ return output
190
+
191
+ def forward(self, wav_list: List[torch.Tensor]):
192
+ if len(wav_list) <= 1:
193
+ speech_features = self.process_audio_for_llm_input(wav_list)
194
+ speech_attn_mask = torch.zeros(1,speech_features.size(1)).bool().to(speech_features.device)
195
+ return speech_features, speech_attn_mask
196
+ else:
197
+ speech_features = []
198
+ speech_attn_mask = []
199
+ for wav in wav_list:
200
+ speech_feature = self.process_audio_for_llm_input(wav)
201
+ speech_features.append(speech_feature)
202
+ speech_attn_mask.append(torch.zeros(1,speech_feature.size(1)).bool())
203
+
204
+ speech_features = self.pad_sequence(speech_features,padding_side='right',padding_value=0.0)
205
+ speech_attn_mask = self.pad_sequence(speech_attn_mask,padding_side='right',padding_value=True).squeeze(1)
206
+ return speech_features, speech_attn_mask
207
+
208
+ def process_audio_for_llm_input(self, wav: torch.Tensor):
209
+ n_frames = 3000
210
+ min_length = 16000
211
+ wav = wav.flatten()
212
+
213
+ if wav.shape[0] < min_length:
214
+ wav = F.pad(wav, (0, min_length - wav.shape[0]))
215
+
216
+ mels = whisper.log_mel_spectrogram(wav, n_mels=self.n_mels).unsqueeze(0).to(self._device)
217
+ if mels.shape[-1] > n_frames:
218
+ mel_segments = []
219
+ # Segment and process long audio
220
+ for i in range(0, mels.shape[-1], n_frames):
221
+ mel = mels[:,:,i:i+n_frames]
222
+ if mel.shape[-1] < n_frames:
223
+ mel = self.pad_or_trim(mel,n_frames)
224
+ mel_segments.append(mel)
225
+
226
+ if self.low_resource:
227
+ audio_features = [self._process_mel_segment(mel) for mel in mel_segments]
228
+ speech_tokens = torch.cat(audio_features, dim=1)
229
+ else:
230
+ # Batch Inference Mode
231
+ mel_segments = torch.cat(mel_segments,dim=0)
232
+ B, _, _ = mel_segments.shape
233
+ audio_features = self._process_mel_segment(mel_segments)
234
+ speech_tokens = audio_features.view(1, B * self.compression_size, self.n_state)
235
+ else:
236
+ if mels.shape[-1] < n_frames:
237
+ mels = self.pad_or_trim(mels,n_frames)
238
+ speech_tokens = self._process_mel_segment(mels)
239
+
240
+ return self.llm_proj(speech_tokens)
241
+
242
+ def _process_mel_segment(self, mel_segment: torch.Tensor):
243
+ # Feature extraction and hierarchical compression
244
+ audio_feature = self.embed_audio(mel_segment)
245
+
246
+ stage_1_token = self.compressor1(x=audio_feature)
247
+ stage_1_feature = self.stage1(audio_feature.transpose(1, 2))
248
+ stage_2_token = self.compressor2(x=stage_1_feature)
249
+ stage_2_feature = self.stage2(stage_1_feature.transpose(1, 2))
250
+ stage_3_token = self.compressor3(x=stage_2_feature)
251
+
252
+ stage_tokens = torch.cat([stage_1_token, stage_2_token, stage_3_token], dim=1)
253
+ compressed_tokens = self.compressor(stage_tokens)
254
+
255
+ # Cross-attention with hierarchical features
256
+ h_audio_feature = torch.cat([audio_feature, stage_1_feature, stage_2_feature], dim=1)
257
+ for block in self.out_attn:
258
+ compressed_tokens = block(x=compressed_tokens, xa=h_audio_feature)
259
+
260
+ return compressed_tokens
261
+
262
+ def pad_sequence(self, sequences, padding_side='right', padding_value=0.0):
263
+ max_len = max(seq.size(1) for seq in sequences)
264
+ output_dims = (len(sequences), max_len) + sequences[0].shape[2:]
265
+ output = torch.full(output_dims, padding_value, dtype=sequences[0].dtype, device=sequences[0].device)
266
+
267
+ for i, seq in enumerate(sequences):
268
+ length = seq.size(1)
269
+ if padding_side == 'right':
270
+ output[i, :length, ...] = seq
271
+ else:
272
+ output[i, -length:, ...] = seq
273
+ return output
274
+
275
+ def pad_or_trim(self, array, length: int = 480000, *, axis: int = -1):
276
+ """
277
+ Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
278
+ """
279
+ if torch.is_tensor(array):
280
+ pad_widths = [(0, 0)] * array.ndim
281
+ pad_widths[axis] = (0, length - array.shape[axis])
282
+ array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
283
+ else:
284
+ pad_widths = [(0, 0)] * array.ndim
285
+ pad_widths[axis] = (0, length - array.shape[axis])
286
+ array = np.pad(array, pad_widths)
287
+ return array
288
+ # --- Main Model Class ---
289
+
290
+ class FastSLMPreTrainedModel(PreTrainedModel):
291
+ config_class = FastSLMConfig
292
+ base_model_prefix = "fastslm"
293
+
294
+ def _init_weights(self, module):
295
+ if isinstance(module, nn.Linear):
296
+ nn.init.normal_(module.weight, std=0.02)
297
+ if module.bias is not None:
298
+ nn.init.zeros_(module.bias)
299
+
300
+ class FastSLMForConditionalGeneration(FastSLMPreTrainedModel, GenerationMixin):
301
+ config_class = FastSLMConfig
302
+ def __init__(self, config: FastSLMConfig):
303
+ super().__init__(config)
304
+
305
+ # Initialize the two main components using their respective sub-configs
306
+ self.encoder = SpeechEncoder(config)
307
+ self.llm = AutoModelForCausalLM.from_config(
308
+ config.llm_config,
309
+ trust_remote_code=True
310
+ )
311
+ if self.llm._tied_weights_keys is not None:
312
+ self._tied_weights_keys = [f"llm.{k}" for k in self.llm._tied_weights_keys]
313
+
314
+ llm_lora_config = LoraConfig(
315
+ r=config.lora_r,
316
+ lora_alpha=config.lora_a,
317
+ target_modules=config.llm_modules,
318
+ lora_dropout=0.01,
319
+ task_type="CAUSAL_LM",
320
+ )
321
+ self.llm = get_peft_model(self.llm, llm_lora_config)
322
+
323
+ self.tokenizer = AutoTokenizer.from_pretrained(config.llm_config._name_or_path, use_fast=False, trust_remote_code=True)
324
+ # Add special tokens
325
+ audio_token = ['<|AUDIO|>', '<|audio_bos|>', '<|audio_eos|>']
326
+ task_token = ['<|ASR|>', '<|AST|>', '<|SSUM|>', '<|SQQA|>']
327
+ language_token = [f"<|{lang.upper()}|>" for lang in LANGUAGES]
328
+ special_tokens = audio_token + language_token + task_token
329
+ self.tokenizer.add_special_tokens({"additional_special_tokens": special_tokens})
330
+
331
+ def get_input_embeddings(self) -> nn.Module:
332
+ """Returns the input embedding layer of the LLM."""
333
+ return self.llm.get_input_embeddings()
334
+
335
+ def set_input_embeddings(self, value: nn.Module):
336
+ """Sets the input embedding layer of the LLM."""
337
+ self.llm.set_input_embeddings(value)
338
+
339
+ def process_audio(self, audio_array: np.ndarray, sample_rate: int) -> torch.Tensor:
340
+ audio = torch.tensor(audio_array, dtype=torch.float32)
341
+ if sample_rate != 16000:
342
+ resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
343
+ audio = resampler(audio)
344
+ return audio
345
+
346
+ def save_pretrained(self, save_directory, **kwargs):
347
+ super().save_pretrained(save_directory, **kwargs)
348
+ if hasattr(self.llm, "save_pretrained"):
349
+ self.llm.save_pretrained(f"{save_directory}/llm")
350
+
351
+ def forward(
352
+ self,
353
+ audio: List[torch.Tensor],
354
+ input_ids: torch.LongTensor = None,
355
+ attention_mask: Optional[torch.Tensor] = None,
356
+ inputs_embeds: Optional[torch.FloatTensor] = None,
357
+ labels: Optional[torch.LongTensor] = None,
358
+ **kwargs
359
+ ):
360
+ speech_query, speech_attn_mask = self.encoder(audio)
361
+
362
+ token_embedding = self.llm.get_input_embeddings()
363
+
364
+ # Create speech labels (-100 to ignore in loss calculation)
365
+ speech_label_len = int(speech_query.shape[1])
366
+ speech_labels = torch.full(
367
+ (speech_query.shape[0], speech_label_len),
368
+ fill_value=-100,
369
+ dtype=torch.long,
370
+ device=speech_query.device
371
+ )
372
+
373
+ audio_token_id = self.tokenizer.convert_tokens_to_ids("<|AUDIO|>")
374
+ idx = torch.nonzero(input_ids[0] == audio_token_id)[0][0].item()
375
+ left_token, right_token = input_ids[:,:idx], input_ids[:,idx+1:]
376
+
377
+ left_label, right_label = labels[:,:idx], labels[:,idx+1:]
378
+ left_embed = token_embedding(left_token.long()).to(speech_query.device)
379
+ right_embed = token_embedding(right_token.long()).to(speech_query.device)
380
+
381
+ left_mask = (left_token != self.tokenizer.pad_token_id).long().to(self.device)
382
+ right_mask = (right_token != self.tokenizer.pad_token_id).long().to(self.device)
383
+ speech_attn_mask = (speech_attn_mask.int() <= 0).long()
384
+
385
+ inputs_embeds = torch.cat([left_embed,speech_query,right_embed],dim=1)
386
+ labels = torch.cat([left_label,speech_labels,right_label], dim=1).long()
387
+ attention_mask = torch.cat([
388
+ left_mask, speech_attn_mask, right_mask
389
+ ], dim=1
390
+ )
391
+
392
+ outputs = self.llm(
393
+ inputs_embeds=inputs_embeds,
394
+ attention_mask=attention_mask,
395
+ labels=labels,
396
+ return_dict=True,
397
+ )
398
+ return outputs
399
+
400
+ def generate(self, input_ids, audio: List[torch.Tensor] = None, **kwargs):
401
+ token_embedding = self.llm.get_input_embeddings()
402
+ if audio is not None:
403
+ speech_query, speech_attn_mask = self.encoder(audio)
404
+ audio_token_id = self.tokenizer.convert_tokens_to_ids("<|AUDIO|>")
405
+ idx = torch.nonzero(input_ids[0] == audio_token_id)[0][0].item()
406
+
407
+ left_embed = token_embedding(input_ids[:, :idx])
408
+ right_embed = token_embedding(input_ids[:, idx+1:])
409
+
410
+ input_embeds = torch.cat([left_embed, speech_query, right_embed], dim=1)
411
+
412
+ # Create attention mask
413
+ left_mask = torch.ones_like(input_ids[:, :idx]).to(input_ids.device)
414
+ right_mask = torch.ones_like(input_ids[:, idx+1:]).to(input_ids.device)
415
+ attention_mask = torch.cat([left_mask, (~speech_attn_mask).long().to(input_ids.device), right_mask], dim=1)
416
+
417
+ generated_ids = self.llm.generate(
418
+ inputs_embeds=input_embeds,
419
+ attention_mask=attention_mask,
420
+ pad_token_id=self.tokenizer.eos_token_id,
421
+ **kwargs
422
+ )
423
+ else:
424
+ input_embeds = token_embedding(input_ids)
425
+ attention_mask = torch.ones([
426
+ input_embeds.size(0), input_embeds.size(1)], dtype=torch.long, device=input_embeds.device
427
+ )
428
+ with self.llm.disable_adapter():
429
+ generated_ids = self.llm.generate(
430
+ inputs_embeds=input_embeds,
431
+ attention_mask=attention_mask,
432
+ pad_token_id=self.tokenizer.eos_token_id,
433
+ **kwargs
434
+ )
435
+ return generated_ids
436
+
437
+ def pad_embeddings(self, sequences, padding_side='right', padding_value=0.0):
438
+ """Pads a list of tensors to the same length."""
439
+ max_len = max(seq.size(0) for seq in sequences)
440
+ output_dims = (len(sequences), max_len) + sequences[0].shape[1:]
441
+ output = torch.full(output_dims, padding_value, dtype=sequences[0].dtype, device=sequences[0].device)
442
+
443
+ for i, seq in enumerate(sequences):
444
+ length = seq.size(0)
445
+ if padding_side == 'right':
446
+ output[i, :length, ...] = seq
447
+ else:
448
+ output[i, -length:, ...] = seq
449
+ return output
450
+
451
+ # Register the model with AutoModelForCausalLM
452
+ AutoConfig.register("fastslm", FastSLMConfig)
453
+ AutoModelForCausalLM.register(FastSLMConfig, FastSLMForConditionalGeneration)