speechisalluneed commited on
Commit
52e95e1
·
verified ·
1 Parent(s): 13e1a8a

Upload FastALM model (hf_model folder)

Browse files
__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .configuration_fastalm import FastALMConfig
2
+ from .modeling_fastalm import FastALMForConditionalGeneration # FastALMForCausalLM
3
+
4
+ from transformers import AutoConfig, AutoModelForCausalLM
5
+
6
+ AutoConfig.register("fastalm", FastALMConfig)
7
+ AutoModelForCausalLM.register(FastALMConfig, FastALMForConditionalGeneration)
added_tokens.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|ASR|>": 151674,
9
+ "<|AST|>": 151675,
10
+ "<|AUDIO|>": 151669,
11
+ "<|EN|>": 151672,
12
+ "<|KO|>": 151673,
13
+ "<|SQQA|>": 151677,
14
+ "<|SSUM|>": 151676,
15
+ "<|audio_bos|>": 151670,
16
+ "<|audio_eos|>": 151671,
17
+ "<|box_end|>": 151649,
18
+ "<|box_start|>": 151648,
19
+ "<|endoftext|>": 151643,
20
+ "<|file_sep|>": 151664,
21
+ "<|fim_middle|>": 151660,
22
+ "<|fim_pad|>": 151662,
23
+ "<|fim_prefix|>": 151659,
24
+ "<|fim_suffix|>": 151661,
25
+ "<|im_end|>": 151645,
26
+ "<|im_start|>": 151644,
27
+ "<|image_pad|>": 151655,
28
+ "<|object_ref_end|>": 151647,
29
+ "<|object_ref_start|>": 151646,
30
+ "<|quad_end|>": 151651,
31
+ "<|quad_start|>": 151650,
32
+ "<|repo_name|>": 151663,
33
+ "<|video_pad|>": 151656,
34
+ "<|vision_end|>": 151653,
35
+ "<|vision_pad|>": 151654,
36
+ "<|vision_start|>": 151652
37
+ }
config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "FastALMForConditionalGeneration"
4
+ ],
5
+ "encoder_config": {
6
+ "compression_size": 50,
7
+ "model_type": "fastalm_speech_encoder",
8
+ "n_ctx": 1500,
9
+ "n_head": 20,
10
+ "n_layer": 32,
11
+ "n_mels": 128,
12
+ "n_state": 1280,
13
+ "stage_tokens": [
14
+ 80,
15
+ 80,
16
+ 80
17
+ ]
18
+ },
19
+ "llm_config": {
20
+ "_name_or_path": "Qwen/Qwen3-4B",
21
+ "architectures": [
22
+ "Qwen3ForCausalLM"
23
+ ],
24
+ "attention_bias": false,
25
+ "attention_dropout": 0.0,
26
+ "bos_token_id": 151643,
27
+ "eos_token_id": 151645,
28
+ "head_dim": 128,
29
+ "hidden_act": "silu",
30
+ "hidden_size": 2560,
31
+ "initializer_range": 0.02,
32
+ "intermediate_size": 9728,
33
+ "max_position_embeddings": 40960,
34
+ "max_window_layers": 36,
35
+ "model_type": "qwen3",
36
+ "num_attention_heads": 32,
37
+ "num_hidden_layers": 36,
38
+ "num_key_value_heads": 8,
39
+ "rms_norm_eps": 1e-06,
40
+ "rope_scaling": null,
41
+ "rope_theta": 1000000,
42
+ "sliding_window": null,
43
+ "tie_word_embeddings": true,
44
+ "torch_dtype": "bfloat16",
45
+ "use_cache": true,
46
+ "use_sliding_window": false,
47
+ "vocab_size": 151936
48
+ },
49
+ "llm_modules": [
50
+ "q_proj",
51
+ "k_proj",
52
+ "v_proj",
53
+ "o_proj",
54
+ "gate_proj",
55
+ "up_proj",
56
+ "down_proj"
57
+ ],
58
+ "lora_a": 64,
59
+ "lora_r": 16,
60
+ "low_resource": false,
61
+ "model_type": "fastalm",
62
+ "torch_dtype": "float32",
63
+ "transformers_version": "4.51.3"
64
+ }
configuration_fastalm.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig, AutoConfig
2
+
3
+ class FastALMSpeechEncoderConfig(PretrainedConfig):
4
+ model_type = "fastalm_speech_encoder"
5
+ def __init__(
6
+ self,
7
+ n_mels=128,
8
+ n_ctx=1500,
9
+ n_state=1280,
10
+ n_head=20,
11
+ n_layer=32,
12
+ stage_tokens=[80, 80, 80],
13
+ compression_size=50,
14
+ **kwargs
15
+ ):
16
+ super().__init__(**kwargs)
17
+ self.n_mels = n_mels
18
+ self.n_ctx = n_ctx
19
+ self.n_state = n_state
20
+ self.n_head = n_head
21
+ self.n_layer = n_layer
22
+ self.stage_tokens = stage_tokens
23
+ self.compression_size = compression_size
24
+
25
+ class FastALMConfig(PretrainedConfig):
26
+ model_type = "fastalm"
27
+ def __init__(
28
+ self,
29
+ encoder_config=None,
30
+ llm_config=None,
31
+ lora_r=16,
32
+ lora_a=64,
33
+ llm_modules=None,
34
+ low_resource=False,
35
+ **kwargs
36
+ ):
37
+ # llm_modules 기본값
38
+ if llm_modules is None:
39
+ llm_modules = ["q_proj","k_proj","v_proj","o_proj","gate_proj","up_proj","down_proj"]
40
+
41
+ # LLM config 처리: dict면 AutoConfig로 변환
42
+ if llm_config is None:
43
+ llm_config = AutoConfig.from_pretrained("Qwen/Qwen3-4B")
44
+ elif isinstance(llm_config, dict):
45
+ if "_name_or_path" in llm_config:
46
+ llm_config = AutoConfig.from_pretrained(llm_config["_name_or_path"], **llm_config)
47
+ else:
48
+ llm_config = AutoConfig.from_dict(llm_config)
49
+
50
+ # Encoder config 처리
51
+ if encoder_config is None:
52
+ encoder_config = FastALMSpeechEncoderConfig()
53
+ elif isinstance(encoder_config, dict):
54
+ encoder_config = FastALMSpeechEncoderConfig(**encoder_config)
55
+
56
+ self.llm_config = llm_config
57
+ self.encoder_config = encoder_config
58
+ self.lora_r = lora_r
59
+ self.lora_a = lora_a
60
+ self.llm_modules = llm_modules
61
+ self.low_resource = low_resource
62
+
63
+ super().__init__(**kwargs)
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.51.3"
4
+ }
llm/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen3-4B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.14.0
llm/adapter_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen3-4B",
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": true,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 64,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.01,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 16,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "gate_proj",
27
+ "up_proj",
28
+ "v_proj",
29
+ "k_proj",
30
+ "down_proj",
31
+ "o_proj",
32
+ "q_proj"
33
+ ],
34
+ "task_type": "CAUSAL_LM",
35
+ "use_dora": false,
36
+ "use_rslora": false
37
+ }
llm/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8951e9e07a241a8ab4c6d5b48ebaf61ff92b631e45e0489ad9fd10ff1bf4d3a1
3
+ size 132187888
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3cafda96beb1da73da4da9c09b8931275c33f27a5e9751a913cc1f23fecd35e
3
+ size 4954711528
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbb67257e183ec358f935dab7143df6c047c9d852561a75af932f4fb3f2982d0
3
+ size 4983450856
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0599cd20bcf1eaa56bc26aaeb241a7ac81c1b7dce1cc535ecc4b1b81e365708c
3
+ size 1805593888
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_fastalm.py ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FastALM/modeling_fastalm.py
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torchaudio
6
+ import torch.nn.functional as F
7
+ import numpy as np
8
+ from torch import Tensor
9
+ import whisper
10
+ from einops import rearrange
11
+ from typing import Optional, List
12
+
13
+ from peft import (
14
+ LoraConfig,
15
+ get_peft_model
16
+ )
17
+ from transformers import (
18
+ AutoModelForCausalLM,
19
+ AutoTokenizer,
20
+ PreTrainedModel,
21
+ GenerationMixin,
22
+ AutoConfig
23
+ )
24
+ from .modeling_whisper import AudioEncoder
25
+ from .configuration_fastalm import FastALMConfig
26
+ # Check for scaled_dot_product_attention availability
27
+ try:
28
+ from torch.nn.functional import scaled_dot_product_attention
29
+ SDPA_AVAILABLE = True
30
+ except (ImportError, RuntimeError, OSError):
31
+ scaled_dot_product_attention = None
32
+ SDPA_AVAILABLE = False
33
+
34
+ LANGUAGES = {
35
+ "en": "english",
36
+ "ko": "korean"
37
+ }
38
+
39
+ def set_trainable_parameters(module, requires_grad=False):
40
+ for param in module.parameters():
41
+ param.requires_grad = requires_grad
42
+ module._requires_grad = requires_grad
43
+
44
+ # --- Helper Modules (Compressor, MHSA, Attention, Downsampler) ---
45
+
46
+ class Compressor(nn.Module):
47
+ def __init__(self, embed_dim, num_heads, num_query, n_ctx):
48
+ super().__init__()
49
+ self.num_heads = num_heads
50
+ self.head_dims = embed_dim // num_heads
51
+ self.n_ctx = n_ctx
52
+
53
+ self.query = nn.Parameter(torch.randn(1, num_query, embed_dim))
54
+ nn.init.normal_(self.query, mean=0.0, std=0.02)
55
+
56
+ self.q_ln = nn.LayerNorm(embed_dim, eps=1e-5)
57
+ self.kv_ln = nn.LayerNorm(embed_dim, eps=1e-5)
58
+
59
+ self.kv_proj = nn.Identity()
60
+ self.out_proj = nn.Linear(embed_dim, embed_dim)
61
+
62
+ self.register_buffer("q_pos_embeds", self.sinusoids(num_query, embed_dim))
63
+ self.register_buffer("kv_pos_embeds", self.sinusoids(n_ctx, embed_dim))
64
+
65
+ self.init_weights()
66
+
67
+ def init_weights(self):
68
+ nn.init.constant_(self.q_ln.bias, 0)
69
+ nn.init.constant_(self.q_ln.weight, 1.0)
70
+ nn.init.constant_(self.kv_ln.bias, 0)
71
+ nn.init.constant_(self.kv_ln.weight, 1.0)
72
+
73
+ def sinusoids(self, length, channels, max_timescale=10000):
74
+ assert channels % 2 == 0
75
+ log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
76
+ inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
77
+ scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
78
+ return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
79
+
80
+ def forward(self, x: Tensor):
81
+ q = self.q_ln(self.query.to(x.device))
82
+ x = self.kv_ln(self.kv_proj(x))
83
+
84
+ q = rearrange(q + self.q_pos_embeds, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
85
+ k = rearrange(x + self.kv_pos_embeds, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
86
+ v = rearrange(x, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
87
+
88
+ attn = scaled_dot_product_attention(q, k, v)
89
+ attn = rearrange(attn, 'b h l d -> b l (h d)')
90
+ x = self.out_proj(attn)
91
+ return x
92
+
93
+ class MHSA(nn.Module):
94
+ def __init__(self, embed_dim, num_heads):
95
+ super().__init__()
96
+ self.embed_dim = embed_dim
97
+ self.num_heads = num_heads
98
+ self.head_dims = embed_dim // num_heads
99
+ self.q = nn.Linear(embed_dim, embed_dim, bias=True)
100
+ self.k = nn.Linear(embed_dim, embed_dim, bias=False)
101
+ self.v = nn.Linear(embed_dim, embed_dim, bias=True)
102
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True)
103
+
104
+ def forward(self, x, xa=None, mask=None):
105
+ q = self.q(x)
106
+ k = self.k(x if xa is None else xa)
107
+ v = self.v(x if xa is None else xa)
108
+
109
+ q = rearrange(q, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
110
+ k = rearrange(k, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
111
+ v = rearrange(v, 'b l (h d) -> b h l d', h=self.num_heads, d=self.head_dims)
112
+
113
+ attn = scaled_dot_product_attention(q, k, v, is_causal=mask is not None)
114
+ attn = rearrange(attn, 'b h l d -> b l (h d)')
115
+
116
+ out = self.out_proj(attn)
117
+ return out
118
+
119
+ class Attention(nn.Module):
120
+ def __init__(self, embed_dim, num_heads):
121
+ super().__init__()
122
+ self.attn = MHSA(embed_dim=embed_dim, num_heads=num_heads)
123
+ self.cross_attn = MHSA(embed_dim=embed_dim, num_heads=num_heads)
124
+ self.norm1 = nn.LayerNorm(embed_dim, eps=1e-5)
125
+ self.norm2 = nn.LayerNorm(embed_dim, eps=1e-5)
126
+
127
+ def forward(self, x: Tensor, xa: Optional[Tensor] = None):
128
+ x = x + self.attn(self.norm1(x))
129
+ x = x + self.cross_attn(x=self.norm2(x), xa=xa)
130
+ return x
131
+
132
+ class Downsampler(nn.Module):
133
+ def __init__(self, embed_dim: int):
134
+ super().__init__()
135
+ self.conv1 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, padding=1)
136
+ self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)
137
+ self.ln_post = nn.LayerNorm(embed_dim, eps=1e-5)
138
+
139
+ def forward(self, x: Tensor):
140
+ x = F.gelu(self.conv1(x))
141
+ x = F.gelu(self.conv2(x))
142
+ x = x.permute(0, 2, 1)
143
+ x = self.ln_post(x)
144
+ return x
145
+
146
+ # --- Speech Encoder Module ---
147
+
148
+ class SpeechEncoder(nn.Module):
149
+ def __init__(self, config: FastALMConfig):
150
+ super().__init__()
151
+ # Initialize the Whisper encoder from its specific sub-configuration
152
+ self.whisper = AudioEncoder(
153
+ n_mels=config.encoder_config.n_mels,
154
+ n_ctx=config.encoder_config.n_ctx,
155
+ n_state=config.encoder_config.n_state,
156
+ n_head=config.encoder_config.n_head,
157
+ n_layer=config.encoder_config.n_layer
158
+ )
159
+ self.n_mels = config.encoder_config.n_mels
160
+ # Freeze the Whisper encoder as it's not trained
161
+ for param in self.whisper.parameters():
162
+ param.requires_grad = False
163
+
164
+ # Initialize the projection layer to match the LLM's hidden dimension
165
+ self.llm_proj = nn.Linear(config.encoder_config.n_state, config.llm_config.hidden_size)
166
+
167
+ # Initialize the hierarchical compressors using parameters from the config
168
+ num_heads = config.encoder_config.n_head
169
+ stage_tokens = config.encoder_config.stage_tokens
170
+ self.compression_size = config.encoder_config.compression_size
171
+ self.n_state = config.encoder_config.n_state
172
+ self.low_resource = config.low_resource
173
+
174
+ self.compressor1 = Compressor(config.encoder_config.n_state, num_heads, stage_tokens[0], 1500)
175
+ self.stage1 = Downsampler(config.encoder_config.n_state)
176
+ self.compressor2 = Compressor(config.encoder_config.n_state, num_heads, stage_tokens[1], 750)
177
+ self.stage2 = Downsampler(config.encoder_config.n_state)
178
+ self.compressor3 = Compressor(config.encoder_config.n_state, num_heads, stage_tokens[2], 375)
179
+ self.compressor = Compressor(config.encoder_config.n_state, num_heads, self.compression_size, sum(stage_tokens))
180
+
181
+ self.out_attn = nn.ModuleList([
182
+ Attention(config.encoder_config.n_state, num_heads) for _ in range(2)
183
+ ])
184
+
185
+ def embed_audio(self, mel: torch.Tensor):
186
+ output = self.whisper(mel)
187
+ # return output.last_hidden_state
188
+ return output
189
+
190
+ def forward(self, wav_list: List[torch.Tensor]):
191
+ if len(wav_list) <= 1:
192
+ speech_features = self.process_audio_for_llm_input(wav_list)
193
+ speech_attn_mask = torch.zeros(1,speech_features.size(1)).bool().to(speech_features.device)
194
+ return speech_features, speech_attn_mask
195
+ else:
196
+ speech_features = []
197
+ speech_attn_mask = []
198
+ for wav in wav_list:
199
+ speech_feature = self.process_audio_for_llm_input(w)
200
+ speech_features.append(speech_feature)
201
+ speech_attn_mask.append(torch.zeros(1,speech_feature.size(1)).bool())
202
+
203
+ speech_features = self.pad_sequence(speech_features,padding_side='right',padding_value=0.0)
204
+ speech_attn_mask = self.pad_sequence(speech_attn_mask,padding_side='right',padding_value=True).squeeze(1)
205
+ return speech_features, speech_attn_mask
206
+
207
+ def process_audio_for_llm_input(self, wav: torch.Tensor):
208
+ n_frames = 3000
209
+ wav = wav.flatten()
210
+ mels = whisper.log_mel_spectrogram(wav, n_mels=self.n_mels).unsqueeze(0)
211
+ if mels.shape[-1] > n_frames:
212
+ mel_segments = []
213
+ # Segment and process long audio
214
+ for i in range(0, mels.shape[-1], n_frames):
215
+ mel = mels[:,:,i:i+n_frames]
216
+ if mel.shape[-1] < n_frames:
217
+ mel = self.pad_or_trim(mel,n_frames)
218
+ mel_segments.append(mel)
219
+
220
+ if self.low_resource:
221
+ audio_features = [self._process_mel_segment(mel) for mel in mel_segments]
222
+ speech_tokens = torch.cat(audio_features, dim=1)
223
+ else:
224
+ # Batch Inference Mode
225
+ mel_segments = torch.cat(mel_segments,dim=0)
226
+ B, _, _ = mel_segments.shape
227
+ audio_features = self._process_mel_segment(mel_segments)
228
+ speech_tokens = audio_features.view(1, B * self.compression_size, self.n_state)
229
+ else:
230
+ if mels.shape[-1] < n_frames:
231
+ mels = self.pad_or_trim(mels,n_frames)
232
+ speech_tokens = self._process_mel_segment(mels)
233
+
234
+ return self.llm_proj(speech_tokens)
235
+
236
+ def _process_mel_segment(self, mel_segment: torch.Tensor):
237
+ # Feature extraction and hierarchical compression
238
+ audio_feature = self.embed_audio(mel_segment)
239
+
240
+ stage_1_token = self.compressor1(x=audio_feature)
241
+ stage_1_feature = self.stage1(audio_feature.transpose(1, 2))
242
+ stage_2_token = self.compressor2(x=stage_1_feature)
243
+ stage_2_feature = self.stage2(stage_1_feature.transpose(1, 2))
244
+ stage_3_token = self.compressor3(x=stage_2_feature)
245
+
246
+ stage_tokens = torch.cat([stage_1_token, stage_2_token, stage_3_token], dim=1)
247
+ compressed_tokens = self.compressor(stage_tokens)
248
+
249
+ # Cross-attention with hierarchical features
250
+ h_audio_feature = torch.cat([audio_feature, stage_1_feature, stage_2_feature], dim=1)
251
+ for block in self.out_attn:
252
+ compressed_tokens = block(x=compressed_tokens, xa=h_audio_feature)
253
+
254
+ return compressed_tokens
255
+
256
+ def pad_sequence(self, sequences, padding_side='right', padding_value=0.0):
257
+ max_len = max(seq.size(1) for seq in sequences)
258
+ output_dims = (len(sequences), max_len) + sequences[0].shape[2:]
259
+ output = torch.full(output_dims, padding_value, dtype=sequences[0].dtype, device=sequences[0].device)
260
+
261
+ for i, seq in enumerate(sequences):
262
+ length = seq.size(1)
263
+ if padding_side == 'right':
264
+ output[i, :length, ...] = seq
265
+ else:
266
+ output[i, -length:, ...] = seq
267
+ return output
268
+
269
+ def pad_or_trim(self, array, length: int = 480000, *, axis: int = -1):
270
+ """
271
+ Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
272
+ """
273
+ if torch.is_tensor(array):
274
+ pad_widths = [(0, 0)] * array.ndim
275
+ pad_widths[axis] = (0, length - array.shape[axis])
276
+ array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
277
+ else:
278
+ pad_widths = [(0, 0)] * array.ndim
279
+ pad_widths[axis] = (0, length - array.shape[axis])
280
+ array = np.pad(array, pad_widths)
281
+ return array
282
+ # --- Main Model Class ---
283
+
284
+ class FastALMPreTrainedModel(PreTrainedModel):
285
+ config_class = FastALMConfig
286
+ base_model_prefix = "fastalm"
287
+
288
+ def _init_weights(self, module):
289
+ if isinstance(module, nn.Linear):
290
+ nn.init.normal_(module.weight, std=0.02)
291
+ if module.bias is not None:
292
+ nn.init.zeros_(module.bias)
293
+
294
+ class FastALMForConditionalGeneration(FastALMPreTrainedModel, GenerationMixin):
295
+ config_class = FastALMConfig
296
+ def __init__(self, config: FastALMConfig):
297
+ super().__init__(config)
298
+
299
+ # Initialize the two main components using their respective sub-configs
300
+ self.encoder = SpeechEncoder(config)
301
+ self.llm = AutoModelForCausalLM.from_config(
302
+ config.llm_config,
303
+ trust_remote_code=True
304
+ )
305
+ if self.llm._tied_weights_keys is not None:
306
+ self._tied_weights_keys = [f"llm.{k}" for k in self.llm._tied_weights_keys]
307
+
308
+ llm_lora_config = LoraConfig(
309
+ r=config.lora_r,
310
+ lora_alpha=config.lora_a,
311
+ target_modules=config.llm_modules,
312
+ lora_dropout=0.01,
313
+ task_type="CAUSAL_LM",
314
+ )
315
+ self.llm = get_peft_model(self.llm, llm_lora_config)
316
+
317
+ self.tokenizer = AutoTokenizer.from_pretrained(config.llm_config._name_or_path, use_fast=False, trust_remote_code=True)
318
+ # Add special tokens
319
+ audio_token = ['<|AUDIO|>', '<|audio_bos|>', '<|audio_eos|>']
320
+ task_token = ['<|ASR|>', '<|AST|>', '<|SSUM|>', '<|SQQA|>']
321
+ language_token = [f"<|{lang.upper()}|>" for lang in LANGUAGES]
322
+ special_tokens = audio_token + language_token + task_token
323
+ self.tokenizer.add_special_tokens({"additional_special_tokens": special_tokens})
324
+
325
+ def get_input_embeddings(self) -> nn.Module:
326
+ """Returns the input embedding layer of the LLM."""
327
+ return self.llm.get_input_embeddings()
328
+
329
+ def set_input_embeddings(self, value: nn.Module):
330
+ """Sets the input embedding layer of the LLM."""
331
+ self.llm.set_input_embeddings(value)
332
+
333
+ def process_audio(self, audio_array: np.ndarray, sample_rate: int) -> torch.Tensor:
334
+ audio = torch.tensor(audio_array, dtype=torch.float32)
335
+ if sample_rate != 16000:
336
+ resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
337
+ audio = resampler(audio)
338
+ return audio
339
+
340
+ def save_pretrained(self, save_directory, **kwargs):
341
+ super().save_pretrained(save_directory, **kwargs)
342
+ if hasattr(self.llm, "save_pretrained"):
343
+ self.llm.save_pretrained(f"{save_directory}/llm")
344
+
345
+ def forward(
346
+ self,
347
+ audio: List[torch.Tensor],
348
+ input_ids: torch.LongTensor = None,
349
+ attention_mask: Optional[torch.Tensor] = None,
350
+ inputs_embeds: Optional[torch.FloatTensor] = None,
351
+ labels: Optional[torch.LongTensor] = None,
352
+ **kwargs
353
+ ):
354
+ speech_query, speech_attn_mask = self.encoder(audio)
355
+
356
+ token_embedding = self.llm.get_input_embeddings()
357
+
358
+ # Create speech labels (-100 to ignore in loss calculation)
359
+ speech_label_len = int(speech_query.shape[1])
360
+ speech_labels = torch.full(
361
+ (speech_query.shape[0], speech_label_len),
362
+ fill_value=-100,
363
+ dtype=torch.long,
364
+ device=speech_query.device
365
+ )
366
+
367
+ audio_token_id = self.tokenizer.convert_tokens_to_ids("<|AUDIO|>")
368
+ idx = torch.nonzero(input_ids[0] == audio_token_id)[0][0].item()
369
+ left_token, right_token = input_ids[:,:idx], input_ids[:,idx+1:]
370
+
371
+ left_label, right_label = labels[:,:idx], labels[:,idx+1:]
372
+ left_embed = token_embedding(left_token.long()).to(speech_query.device)
373
+ right_embed = token_embedding(right_token.long()).to(speech_query.device)
374
+
375
+ left_mask = (left_token != self.tokenizer.pad_token_id).long().to(self.device)
376
+ right_mask = (right_token != self.tokenizer.pad_token_id).long().to(self.device)
377
+ speech_attn_mask = (speech_attn_mask.int() <= 0).long()
378
+
379
+ inputs_embeds = torch.cat([left_embed,speech_query,right_embed],dim=1)
380
+ labels = torch.cat([left_label,speech_labels,right_label], dim=1).long()
381
+ attention_mask = torch.cat([
382
+ left_mask, speech_attn_mask, right_mask
383
+ ], dim=1
384
+ )
385
+
386
+ outputs = self.llm(
387
+ inputs_embeds=inputs_embeds,
388
+ attention_mask=attention_mask,
389
+ labels=labels,
390
+ return_dict=True,
391
+ )
392
+ return outputs
393
+
394
+ def generate(self, input_ids, audio: List[torch.Tensor] = None, **kwargs):
395
+ token_embedding = self.llm.get_input_embeddings()
396
+ if audio is not None:
397
+ speech_query, speech_attn_mask = self.encoder(audio)
398
+ audio_token_id = self.tokenizer.convert_tokens_to_ids("<|AUDIO|>")
399
+ idx = torch.nonzero(input_ids[0] == audio_token_id)[0][0].item()
400
+
401
+ left_embed = token_embedding(input_ids[:, :idx])
402
+ right_embed = token_embedding(input_ids[:, idx+1:])
403
+
404
+ input_embeds = torch.cat([left_embed, speech_query, right_embed], dim=1)
405
+
406
+ # Create attention mask
407
+ left_mask = torch.ones_like(input_ids[:, :idx]).to(input_ids.device)
408
+ right_mask = torch.ones_like(input_ids[:, idx+1:]).to(input_ids.device)
409
+ attention_mask = torch.cat([left_mask, (~speech_attn_mask).long(), right_mask], dim=1)
410
+
411
+ generated_ids = self.llm.generate(
412
+ inputs_embeds=input_embeds,
413
+ attention_mask=attention_mask,
414
+ pad_token_id=self.tokenizer.eos_token_id,
415
+ **kwargs
416
+ )
417
+ else:
418
+ input_embeds = token_embedding(input_ids)
419
+ attention_mask = torch.ones([
420
+ input_embeds.size(0), input_embeds.size(1)], dtype=torch.long, device=input_embeds.device
421
+ )
422
+ with self.llm.disable_adapter():
423
+ generated_ids = self.llm.generate(
424
+ inputs_embeds=input_embeds,
425
+ attention_mask=attention_mask,
426
+ pad_token_id=self.tokenizer.eos_token_id,
427
+ **kwargs
428
+ )
429
+ return generated_ids
430
+
431
+ def pad_embeddings(self, sequences, padding_side='right', padding_value=0.0):
432
+ """Pads a list of tensors to the same length."""
433
+ max_len = max(seq.size(0) for seq in sequences)
434
+ output_dims = (len(sequences), max_len) + sequences[0].shape[1:]
435
+ output = torch.full(output_dims, padding_value, dtype=sequences[0].dtype, device=sequences[0].device)
436
+
437
+ for i, seq in enumerate(sequences):
438
+ length = seq.size(0)
439
+ if padding_side == 'right':
440
+ output[i, :length, ...] = seq
441
+ else:
442
+ output[i, -length:, ...] = seq
443
+ return output
444
+
445
+ # Register the model with AutoModelForCausalLM
446
+ AutoConfig.register("fastalm", FastALMConfig)
447
+ AutoModelForCausalLM.register(FastALMConfig, FastALMForConditionalGeneration)
modeling_whisper.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import gzip
3
+ from contextlib import contextmanager
4
+ from dataclasses import dataclass
5
+ from typing import Dict, Iterable, Optional, Tuple
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from torch import Tensor, nn
11
+
12
+ try:
13
+ from torch.nn.functional import scaled_dot_product_attention
14
+
15
+ SDPA_AVAILABLE = True
16
+ except (ImportError, RuntimeError, OSError):
17
+ scaled_dot_product_attention = None
18
+ SDPA_AVAILABLE = False
19
+
20
+
21
+ @dataclass
22
+ class ModelDimensions:
23
+ n_mels: int
24
+ n_audio_ctx: int
25
+ n_audio_state: int
26
+ n_audio_head: int
27
+ n_audio_layer: int
28
+ n_vocab: int
29
+ n_text_ctx: int
30
+ n_text_state: int
31
+ n_text_head: int
32
+ n_text_layer: int
33
+
34
+
35
+ class LayerNorm(nn.LayerNorm):
36
+ def forward(self, x: Tensor) -> Tensor:
37
+ return super().forward(x.float()).type(x.dtype)
38
+
39
+
40
+ class Linear(nn.Linear):
41
+ def forward(self, x: Tensor) -> Tensor:
42
+ return F.linear(
43
+ x,
44
+ self.weight.to(x.dtype),
45
+ None if self.bias is None else self.bias.to(x.dtype),
46
+ )
47
+
48
+
49
+ class Conv1d(nn.Conv1d):
50
+ def _conv_forward(
51
+ self, x: Tensor, weight: Tensor, bias: Optional[Tensor]
52
+ ) -> Tensor:
53
+ return super()._conv_forward(
54
+ x, weight.to(x.dtype), None if bias is None else bias.to(x.dtype)
55
+ )
56
+
57
+
58
+ def sinusoids(length, channels, max_timescale=10000):
59
+ """Returns sinusoids for positional embedding"""
60
+ assert channels % 2 == 0
61
+ log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
62
+ inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
63
+ scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
64
+ return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
65
+
66
+
67
+ @contextmanager
68
+ def disable_sdpa():
69
+ prev_state = MultiHeadAttention.use_sdpa
70
+ try:
71
+ MultiHeadAttention.use_sdpa = False
72
+ yield
73
+ finally:
74
+ MultiHeadAttention.use_sdpa = prev_state
75
+
76
+
77
+ class MultiHeadAttention(nn.Module):
78
+ use_sdpa = True
79
+
80
+ def __init__(self, n_state: int, n_head: int):
81
+ super().__init__()
82
+ self.n_head = n_head
83
+ self.query = Linear(n_state, n_state)
84
+ self.key = Linear(n_state, n_state, bias=False)
85
+ self.value = Linear(n_state, n_state)
86
+ self.out = Linear(n_state, n_state)
87
+
88
+ def forward(
89
+ self,
90
+ x: Tensor,
91
+ xa: Optional[Tensor] = None,
92
+ mask: Optional[Tensor] = None,
93
+ kv_cache: Optional[dict] = None,
94
+ ):
95
+ q = self.query(x)
96
+
97
+ if kv_cache is None or xa is None or self.key not in kv_cache:
98
+ # hooks, if installed (i.e. kv_cache is not None), will prepend the cached kv tensors;
99
+ # otherwise, perform key/value projections for self- or cross-attention as usual.
100
+ k = self.key(x if xa is None else xa)
101
+ v = self.value(x if xa is None else xa)
102
+ else:
103
+ # for cross-attention, calculate keys and values once and reuse in subsequent calls.
104
+ k = kv_cache[self.key]
105
+ v = kv_cache[self.value]
106
+
107
+ wv, qk = self.qkv_attention(q, k, v, mask)
108
+ return self.out(wv), qk
109
+
110
+ def qkv_attention(
111
+ self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None
112
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
113
+ n_batch, n_ctx, n_state = q.shape
114
+ scale = (n_state // self.n_head) ** -0.25
115
+ q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
116
+ k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
117
+ v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
118
+
119
+ if SDPA_AVAILABLE and MultiHeadAttention.use_sdpa:
120
+ a = scaled_dot_product_attention(
121
+ q, k, v, is_causal=mask is not None and n_ctx > 1
122
+ )
123
+ out = a.permute(0, 2, 1, 3).flatten(start_dim=2)
124
+ qk = None
125
+ else:
126
+ qk = (q * scale) @ (k * scale).transpose(-1, -2)
127
+ if mask is not None:
128
+ qk = qk + mask[:n_ctx, :n_ctx]
129
+ qk = qk.float()
130
+
131
+ w = F.softmax(qk, dim=-1).to(q.dtype)
132
+ out = (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2)
133
+ qk = qk.detach()
134
+
135
+ return out, qk
136
+
137
+
138
+ class ResidualAttentionBlock(nn.Module):
139
+ def __init__(self, n_state: int, n_head: int, cross_attention: bool = False):
140
+ super().__init__()
141
+
142
+ self.attn = MultiHeadAttention(n_state, n_head)
143
+ self.attn_ln = LayerNorm(n_state)
144
+
145
+ self.cross_attn = (
146
+ MultiHeadAttention(n_state, n_head) if cross_attention else None
147
+ )
148
+ self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
149
+
150
+ n_mlp = n_state * 4
151
+ self.mlp = nn.Sequential(
152
+ Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state)
153
+ )
154
+ self.mlp_ln = LayerNorm(n_state)
155
+
156
+ def forward(
157
+ self,
158
+ x: Tensor,
159
+ xa: Optional[Tensor] = None,
160
+ mask: Optional[Tensor] = None,
161
+ kv_cache: Optional[dict] = None,
162
+ ):
163
+ x = x + self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)[0]
164
+ if self.cross_attn:
165
+ x = x + self.cross_attn(self.cross_attn_ln(x), xa, kv_cache=kv_cache)[0]
166
+ x = x + self.mlp(self.mlp_ln(x))
167
+ return x
168
+
169
+
170
+ class AudioEncoder(nn.Module):
171
+ def __init__(
172
+ self, n_mels: int, n_ctx: int, n_state: int, n_head: int, n_layer: int
173
+ ):
174
+ super().__init__()
175
+ self.conv1 = Conv1d(n_mels, n_state, kernel_size=3, padding=1)
176
+ self.conv2 = Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1)
177
+ self.register_buffer("positional_embedding", sinusoids(n_ctx, n_state))
178
+
179
+ self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList(
180
+ [ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)]
181
+ )
182
+ self.ln_post = LayerNorm(n_state)
183
+
184
+ def forward(self, x: Tensor):
185
+ """
186
+ x : torch.Tensor, shape = (batch_size, n_mels, n_ctx)
187
+ the mel spectrogram of the audio
188
+ """
189
+ x = F.gelu(self.conv1(x))
190
+ x = F.gelu(self.conv2(x))
191
+ x = x.permute(0, 2, 1)
192
+
193
+ assert x.shape[1:] == self.positional_embedding.shape, "incorrect audio shape"
194
+ x = (x + self.positional_embedding).to(x.dtype)
195
+
196
+ for block in self.blocks:
197
+ x = block(x)
198
+
199
+ x = self.ln_post(x)
200
+ return x
special_tokens_map.json ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|AUDIO|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|audio_bos|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<|audio_eos|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "<|EN|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ {
32
+ "content": "<|KO|>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ {
39
+ "content": "<|ASR|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ },
45
+ {
46
+ "content": "<|AST|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ {
53
+ "content": "<|SSUM|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false
58
+ },
59
+ {
60
+ "content": "<|SQQA|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ }
66
+ ],
67
+ "eos_token": {
68
+ "content": "<|im_end|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false
73
+ },
74
+ "pad_token": {
75
+ "content": "<|endoftext|>",
76
+ "lstrip": false,
77
+ "normalized": false,
78
+ "rstrip": false,
79
+ "single_word": false
80
+ }
81
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ },
213
+ "151669": {
214
+ "content": "<|AUDIO|>",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false,
219
+ "special": true
220
+ },
221
+ "151670": {
222
+ "content": "<|audio_bos|>",
223
+ "lstrip": false,
224
+ "normalized": false,
225
+ "rstrip": false,
226
+ "single_word": false,
227
+ "special": true
228
+ },
229
+ "151671": {
230
+ "content": "<|audio_eos|>",
231
+ "lstrip": false,
232
+ "normalized": false,
233
+ "rstrip": false,
234
+ "single_word": false,
235
+ "special": true
236
+ },
237
+ "151672": {
238
+ "content": "<|EN|>",
239
+ "lstrip": false,
240
+ "normalized": false,
241
+ "rstrip": false,
242
+ "single_word": false,
243
+ "special": true
244
+ },
245
+ "151673": {
246
+ "content": "<|KO|>",
247
+ "lstrip": false,
248
+ "normalized": false,
249
+ "rstrip": false,
250
+ "single_word": false,
251
+ "special": true
252
+ },
253
+ "151674": {
254
+ "content": "<|ASR|>",
255
+ "lstrip": false,
256
+ "normalized": false,
257
+ "rstrip": false,
258
+ "single_word": false,
259
+ "special": true
260
+ },
261
+ "151675": {
262
+ "content": "<|AST|>",
263
+ "lstrip": false,
264
+ "normalized": false,
265
+ "rstrip": false,
266
+ "single_word": false,
267
+ "special": true
268
+ },
269
+ "151676": {
270
+ "content": "<|SSUM|>",
271
+ "lstrip": false,
272
+ "normalized": false,
273
+ "rstrip": false,
274
+ "single_word": false,
275
+ "special": true
276
+ },
277
+ "151677": {
278
+ "content": "<|SQQA|>",
279
+ "lstrip": false,
280
+ "normalized": false,
281
+ "rstrip": false,
282
+ "single_word": false,
283
+ "special": true
284
+ }
285
+ },
286
+ "additional_special_tokens": [
287
+ "<|AUDIO|>",
288
+ "<|audio_bos|>",
289
+ "<|audio_eos|>",
290
+ "<|EN|>",
291
+ "<|KO|>",
292
+ "<|ASR|>",
293
+ "<|AST|>",
294
+ "<|SSUM|>",
295
+ "<|SQQA|>"
296
+ ],
297
+ "bos_token": null,
298
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in content %}\n {%- set reasoning_content = content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n {%- if enable_thinking is defined and enable_thinking is false %}\n {{- '<think>\\n\\n</think>\\n\\n' }}\n {%- endif %}\n{%- endif %}",
299
+ "clean_up_tokenization_spaces": false,
300
+ "eos_token": "<|im_end|>",
301
+ "errors": "replace",
302
+ "extra_special_tokens": {},
303
+ "model_max_length": 131072,
304
+ "pad_token": "<|endoftext|>",
305
+ "split_special_tokens": false,
306
+ "tokenizer_class": "Qwen2Tokenizer",
307
+ "unk_token": null
308
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff