niobures commited on
Commit
9ae7b3a
·
verified ·
1 Parent(s): 03a4320

Step-Audio (models)

Browse files
Files changed (27) hide show
  1. .gitattributes +2 -0
  2. models/Step-Audio-2-mini-Think/.gitattributes +37 -0
  3. models/Step-Audio-2-mini-Think/README.md +869 -0
  4. models/Step-Audio-2-mini-Think/added_tokens.json +0 -0
  5. models/Step-Audio-2-mini-Think/assets/architecture5.png +3 -0
  6. models/Step-Audio-2-mini-Think/assets/arxiv.svg +1 -0
  7. models/Step-Audio-2-mini-Think/assets/logo.png +0 -0
  8. models/Step-Audio-2-mini-Think/assets/qrcode.jpg +0 -0
  9. models/Step-Audio-2-mini-Think/assets/radar.png +3 -0
  10. models/Step-Audio-2-mini-Think/assets/wechat_group.png +0 -0
  11. models/Step-Audio-2-mini-Think/config.json +38 -0
  12. models/Step-Audio-2-mini-Think/configuration_step_audio_2.py +128 -0
  13. models/Step-Audio-2-mini-Think/merges.txt +0 -0
  14. models/Step-Audio-2-mini-Think/model-00001.safetensors +3 -0
  15. models/Step-Audio-2-mini-Think/model-00002.safetensors +3 -0
  16. models/Step-Audio-2-mini-Think/model.safetensors.index.json +1 -0
  17. models/Step-Audio-2-mini-Think/modeling_step_audio_2.py +425 -0
  18. models/Step-Audio-2-mini-Think/source.txt +1 -0
  19. models/Step-Audio-2-mini-Think/special_tokens_map.json +235 -0
  20. models/Step-Audio-2-mini-Think/token2wav/campplus.onnx +3 -0
  21. models/Step-Audio-2-mini-Think/token2wav/flow.pt +3 -0
  22. models/Step-Audio-2-mini-Think/token2wav/flow.yaml +34 -0
  23. models/Step-Audio-2-mini-Think/token2wav/hift.pt +3 -0
  24. models/Step-Audio-2-mini-Think/token2wav/speech_tokenizer_v2_25hz.onnx +3 -0
  25. models/Step-Audio-2-mini-Think/tokenizer.json +0 -0
  26. models/Step-Audio-2-mini-Think/tokenizer_config.json +0 -0
  27. models/Step-Audio-2-mini-Think/vocab.json +0 -0
.gitattributes CHANGED
@@ -44,3 +44,5 @@ models/Step-Audio-2-mini-Base/assets/radar.png filter=lfs diff=lfs merge=lfs -te
44
  models/Step-Audio-2-mini-fork/assets/architecture5.png filter=lfs diff=lfs merge=lfs -text
45
  models/Step-Audio-2-mini-fork/assets/radar.png filter=lfs diff=lfs merge=lfs -text
46
  models/Step-Audio-2-mini-fork/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
44
  models/Step-Audio-2-mini-fork/assets/architecture5.png filter=lfs diff=lfs merge=lfs -text
45
  models/Step-Audio-2-mini-fork/assets/radar.png filter=lfs diff=lfs merge=lfs -text
46
  models/Step-Audio-2-mini-fork/tokenizer.json filter=lfs diff=lfs merge=lfs -text
47
+ models/Step-Audio-2-mini-Think/assets/architecture5.png filter=lfs diff=lfs merge=lfs -text
48
+ models/Step-Audio-2-mini-Think/assets/radar.png filter=lfs diff=lfs merge=lfs -text
models/Step-Audio-2-mini-Think/.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/architecture5.png filter=lfs diff=lfs merge=lfs -text
37
+ assets/radar.png filter=lfs diff=lfs merge=lfs -text
models/Step-Audio-2-mini-Think/README.md ADDED
@@ -0,0 +1,869 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+
5
+ <div align="center">
6
+ <img src="assets/logo.png" height=100>
7
+ </div>
8
+
9
+ <div align="center" style="line-height: 1;">
10
+ <a href="https://github.com/stepfun-ai/Step-Audio2" target="_blank"><img alt="GitHub" src="https://img.shields.io/badge/GitHub-StepFun-white?logo=github&logoColor=white"/></a> &ensp;
11
+ <a href="https://stepfun.com/" target="_blank"><img alt="Homepage" src="https://img.shields.io/badge/Homepage-StepFun-white?logo=StepFun&logoColor=white"/></a> &ensp;
12
+ <a href="https://x.com/StepFun_ai" target="_blank"><img alt="Twitter Follow" src="https://img.shields.io/badge/Twitter-StepFun-white?logo=x&logoColor=white"/></a> &ensp;
13
+ <a href="https://discord.com/invite/XHheP5Fn" target="_blank"><img alt="Discord" src="https://img.shields.io/badge/Discord-StepFun-white?logo=discord&logoColor=white"/></a>
14
+ </div>
15
+ <div align="center">
16
+ <a href="https://huggingface.co/stepfun-ai/Step-Audio-2-mini"><img src="https://img.shields.io/static/v1?label=Step-Audio-2-mini&message=HuggingFace&color=yellow"></a> &ensp;
17
+ <a href="https://huggingface.co/stepfun-ai/Step-Audio-2-mini-Base"><img src="https://img.shields.io/static/v1?label=Step-Audio-2-mini-Base&message=HuggingFace&color=yellow"></a>
18
+ </div>
19
+ <div align="center">
20
+ <a href="https://arxiv.org/abs/2507.16632"><img src="assets/arxiv.svg"></a> &ensp;
21
+ <a href="https://github.com/stepfun-ai/Step-Audio2/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/badge/License-Apache%202.0-blue?&color=blue"/></a>
22
+ </div>
23
+
24
+ ## Introduction
25
+
26
+
27
+ Step-Audio 2 is an end-to-end multi-modal large language model designed for industry-strength audio understanding and speech conversation.
28
+
29
+ - **Advanced Speech and Audio Understanding**: Promising performance in ASR and audio understanding by comprehending and reasoning semantic information, para-linguistic and non-vocal information.
30
+
31
+ - **Intelligent Speech Conversation**: Achieving natural and intelligent interactions that are contextually appropriate for various conversational scenarios and paralinguistic information.
32
+
33
+ - **Tool Calling and Multimodal RAG**: By leveraging tool calling and RAG to access real-world knowledge (both textual and acoustic), Step-Audio 2 can generate responses with fewer hallucinations for diverse scenarios, while also having the ability to switch timbres based on retrieved speech.
34
+
35
+ - **State-of-the-Art Performance**: Achieving state-of-the-art performance on various audio understanding and conversational benchmarks compared to other open-source and commercial solutions. (See [Evaluation](#evaluation) and [Technical Report](https://arxiv.org/pdf/2507.16632)).
36
+
37
+ + **Open-source**: [Step-Audio 2 mini](https://huggingface.co/stepfun-ai/Step-Audio-2-mini) and [Step-Audio 2 mini Base](https://huggingface.co/stepfun-ai/Step-Audio-2-mini-Base) are released under [Apache 2.0](LICENSE) license.
38
+
39
+ ## Model Download
40
+ ### Huggingface
41
+ | Models | 🤗 Hugging Face |
42
+ |-------|-------|
43
+ | Step-Audio 2 mini | [stepfun-ai/Step-Audio-2-mini](https://huggingface.co/stepfun-ai/Step-Audio-2-mini) |
44
+ | Step-Audio 2 mini Base | [stepfun-ai/Step-Audio-2-mini-Base](https://huggingface.co/stepfun-ai/Step-Audio-2-mini-Base) |
45
+
46
+ <!-- ### Modelscope
47
+ | Models | Links |
48
+ |-------|-------|
49
+ | Step-Audio-2-mini | [modelscope](https://modelscope.cn/models/stepfun-ai/Step-Audio-2-mini) |
50
+ | Step-Audio-2-mini-Base | [modelscope](https://modelscope.cn/models/stepfun-ai/Step-Audio-2-mini-Base) | -->
51
+
52
+ ## Model Usage
53
+ ### 🔧 Dependencies and Installation
54
+ - Python >= 3.10
55
+ - [PyTorch >= 2.3-cu121](https://pytorch.org/)
56
+ - [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads)
57
+
58
+ ```bash
59
+ conda create -n stepaudio2 python=3.10
60
+ conda activate stepaudio2
61
+ pip install transformers==4.49.0 torchaudio librosa onnxruntime s3tokenizer diffusers hyperpyyaml
62
+
63
+ git clone https://github.com/stepfun-ai/Step-Audio2.git
64
+ cd Step-Audio2
65
+ git lfs install
66
+ git clone https://huggingface.co/stepfun-ai/Step-Audio-2-mini
67
+ ```
68
+
69
+ ### 🚀 Inference Scripts
70
+
71
+ ```bash
72
+ python examples.py
73
+ ```
74
+
75
+ ### 🚀 Local web demonstration
76
+
77
+ ```bash
78
+ pip install gradio
79
+ python web_demo.py
80
+ ```
81
+
82
+
83
+ ## Online demonstration
84
+
85
+ ### StepFun realtime console
86
+
87
+ - Both Step-Audio 2 and Step-Audio 2 mini are available in our [StepFun realtime console](https://realtime-console.stepfun.com/) with web search tool enabled.
88
+ - You will need an API key from the [StepFun Open Platform](https://platform.stepfun.com/).
89
+
90
+ ### StepFun AI Assistant
91
+
92
+ - Step-Audio 2 is also available in our StepFun AI Assistant mobile App with both web and audio search tools enabled.
93
+ - Please scan the following QR code to download it from your app store then tap the phone icon in the top-right corner.
94
+
95
+ <div align="center">
96
+ <img src="./assets/qrcode.jpg" width="200" alt="QR code">
97
+ </div>
98
+
99
+ ## WeChat group
100
+
101
+ You can scan the following QR code to join our WeChat group for communication and discussion.
102
+ <div align="center">
103
+ <img src="./assets/wechat_group.png" width="200" alt="QR code">
104
+ </div>
105
+
106
+ ## Evaluation
107
+ <div align="center">
108
+ <img src="assets/radar.png" alt="Architecture" width="600" />
109
+ </div>
110
+
111
+ ### Automatic speech recognition
112
+ CER for Chinese, Cantonese and Japanese and WER for Arabian and English. N/A indicates that the language is not supported.
113
+
114
+ <table border="1" cellpadding="5" cellspacing="0" align="center">
115
+ <thead>
116
+ <tr>
117
+ <th style="text-align: center;">Category</th>
118
+ <th style="text-align: center;">Test set</th>
119
+ <th style="text-align: center;">Doubao LLM ASR</th>
120
+ <th style="text-align: center;">GPT-4o Transcribe</th>
121
+ <th style="text-align: center;">Kimi-Audio</th>
122
+ <th style="text-align: center;">Qwen-Omni</th>
123
+ <th style="text-align: center;">Step-Audio 2</th>
124
+ <th style="text-align: center;">Step-Audio 2 mini</th>
125
+ </tr>
126
+ </thead>
127
+ <tbody>
128
+ <tr>
129
+ <td rowspan="5" style="text-align: center; vertical-align: middle;"><strong>English</strong></td>
130
+ <td align="left">Common Voice</td>
131
+ <td align="center">9.20</td>
132
+ <td align="center">9.30</td>
133
+ <td align="center">7.83</td>
134
+ <td align="center">8.33</td>
135
+ <td align="center"><strong>5.95</strong></td>
136
+ <td align="center">6.76</td>
137
+ </tr>
138
+ <tr>
139
+ <td align="left">FLEURS English</td>
140
+ <td align="center">7.22</td>
141
+ <td align="center"><strong>2.71</strong></td>
142
+ <td align="center">4.47</td>
143
+ <td align="center">5.05</td>
144
+ <td align="center">3.03</td>
145
+ <td align="center">3.05</td>
146
+ </tr>
147
+ <tr>
148
+ <td align="left">LibriSpeech clean</td>
149
+ <td align="center">2.92</td>
150
+ <td align="center">1.75</td>
151
+ <td align="center">1.49</td>
152
+ <td align="center">2.93</td>
153
+ <td align="center"><strong>1.17</strong></td>
154
+ <td align="center">1.33</td>
155
+ </tr>
156
+ <tr>
157
+ <td align="left">LibriSpeech other</td>
158
+ <td align="center">5.32</td>
159
+ <td align="center">4.23</td>
160
+ <td align="center">2.91</td>
161
+ <td align="center">5.07</td>
162
+ <td align="center"><strong>2.42</strong></td>
163
+ <td align="center">2.86</td>
164
+ </tr>
165
+ <tr>
166
+ <td align="left"><strong>Average</strong></td>
167
+ <td align="center">6.17</td>
168
+ <td align="center">4.50</td>
169
+ <td align="center">4.18</td>
170
+ <td align="center">5.35</td>
171
+ <td align="center"><strong>3.14</strong></td>
172
+ <td align="center">3.50</td>
173
+ </tr>
174
+ <tr>
175
+ <td rowspan="7" style="text-align: center; vertical-align: middle;"><strong>Chinese</strong></td>
176
+ <td align="left">AISHELL</td>
177
+ <td align="center">0.98</td>
178
+ <td align="center">3.52</td>
179
+ <td align="center">0.64</td>
180
+ <td align="center">1.17</td>
181
+ <td align="center"><strong>0.63</strong></td>
182
+ <td align="center">0.78</td>
183
+ </tr>
184
+ <tr>
185
+ <td align="left">AISHELL-2</td>
186
+ <td align="center">3.10</td>
187
+ <td align="center">4.26</td>
188
+ <td align="center">2.67</td>
189
+ <td align="center">2.40</td>
190
+ <td align="center"><strong>2.10</strong></td>
191
+ <td align="center">2.16</td>
192
+ </tr>
193
+ <tr>
194
+ <td align="left">FLEURS Chinese</td>
195
+ <td align="center">2.92</td>
196
+ <td align="center">2.62</td>
197
+ <td align="center">2.91</td>
198
+ <td align="center">7.01</td>
199
+ <td align="center">2.68</td>
200
+ <td align="center"><strong>2.53</strong></td>
201
+ </tr>
202
+ <tr>
203
+ <td align="left">KeSpeech phase1</td>
204
+ <td align="center">6.48</td>
205
+ <td align="center">26.80</td>
206
+ <td align="center">5.11</td>
207
+ <td align="center">6.45</td>
208
+ <td align="center"><strong>3.63</strong></td>
209
+ <td align="center">3.97</td>
210
+ </tr>
211
+ <tr>
212
+ <td align="left">WenetSpeech meeting</td>
213
+ <td align="center">4.90</td>
214
+ <td align="center">31.40</td>
215
+ <td align="center">5.21</td>
216
+ <td align="center">6.61</td>
217
+ <td align="center"><strong>4.75</strong></td>
218
+ <td align="center">4.87</td>
219
+ </tr>
220
+ <tr>
221
+ <td align="left">WenetSpeech net</td>
222
+ <td align="center"><strong>4.46</strong></td>
223
+ <td align="center">15.71</td>
224
+ <td align="center">5.93</td>
225
+ <td align="center">5.24</td>
226
+ <td align="center">4.67</td>
227
+ <td align="center">4.82</td>
228
+ </tr>
229
+ <tr>
230
+ <td align="left"><strong>Average</strong></td>
231
+ <td align="center">3.81</td>
232
+ <td align="center">14.05</td>
233
+ <td align="center">3.75</td>
234
+ <td align="center">4.81</td>
235
+ <td align="center"><strong>3.08</strong></td>
236
+ <td align="center">3.19</td>
237
+ </tr>
238
+ <tr>
239
+ <td rowspan="3" style="text-align: center; vertical-align: middle;"><strong>Multilingual </strong></td>
240
+ <td align="left">FLEURS Arabian</td>
241
+ <td align="center">N/A</td>
242
+ <td align="center"><strong>11.72</strong></td>
243
+ <td align="center">N/A</td>
244
+ <td align="center">25.13</td>
245
+ <td align="center">14.22</td>
246
+ <td align="center">16.46</td>
247
+ </tr>
248
+ <tr>
249
+ <td align="left">Common Voice yue</td>
250
+ <td align="center">9.20</td>
251
+ <td align="center">11.10</td>
252
+ <td align="center">38.90</td>
253
+ <td align="center"><strong>7.89</strong></td>
254
+ <td align="center">7.90</td>
255
+ <td align="center">8.32</td>
256
+ </tr>
257
+ <tr>
258
+ <td align="left">FLEURS Japanese</td>
259
+ <td align="center">N/A</td>
260
+ <td align="center"><strong>3.27</strong></td>
261
+ <td align="center">N/A</td>
262
+ <td align="center">10.49</td>
263
+ <td align="center">3.18</td>
264
+ <td align="center">4.67</td>
265
+ </tr>
266
+ <tr>
267
+ <td rowspan="7" style="text-align: center; vertical-align: middle;"><strong>In-house</strong></td>
268
+ <td align="left">Anhui accent</td>
269
+ <td align="center"><strong>8.83</strong></td>
270
+ <td align="center">50.55</td>
271
+ <td align="center">22.17</td>
272
+ <td align="center">18.73</td>
273
+ <td align="center">10.61</td>
274
+ <td align="center">11.65</td>
275
+ </tr>
276
+ <tr>
277
+ <td align="left">Guangdong accent</td>
278
+ <td align="center">4.99</td>
279
+ <td align="center">7.83</td>
280
+ <td align="center"><strong>3.76</strong></td>
281
+ <td align="center">4.03</td>
282
+ <td align="center">3.81</td>
283
+ <td align="center">4.44</td>
284
+ </tr>
285
+ <tr>
286
+ <td align="left">Guangxi accent</td>
287
+ <td align="center">3.37</td>
288
+ <td align="center">7.09</td>
289
+ <td align="center">4.29</td>
290
+ <td align="center"><strong>3.35</strong></td>
291
+ <td align="center">4.11</td>
292
+ <td align="center">3.51</td>
293
+ </tr>
294
+ <tr>
295
+ <td align="left">Shanxi accent</td>
296
+ <td align="center">20.26</td>
297
+ <td align="center">55.03</td>
298
+ <td align="center">34.71</td>
299
+ <td align="center">25.95</td>
300
+ <td align="center"><strong>12.44</strong></td>
301
+ <td align="center">15.60</td>
302
+ </tr>
303
+ <tr>
304
+ <td align="left">Sichuan dialect</td>
305
+ <td align="center"><strong>3.01</strong></td>
306
+ <td align="center">32.85</td>
307
+ <td align="center">5.26</td>
308
+ <td align="center">5.61</td>
309
+ <td align="center">4.35</td>
310
+ <td align="center">4.57</td>
311
+ </tr>
312
+ <tr>
313
+ <td align="left">Shanghai dialect</td>
314
+ <td align="center">47.49</td>
315
+ <td align="center">89.58</td>
316
+ <td align="center">82.90</td>
317
+ <td align="center">58.74</td>
318
+ <td align="center"><strong>17.77</strong></td>
319
+ <td align="center">19.30</td>
320
+ </tr>
321
+ <tr>
322
+ <td align="left"><strong>Average</strong></td>
323
+ <td align="center">14.66</td>
324
+ <td align="center">40.49</td>
325
+ <td align="center">25.52</td>
326
+ <td align="center">19.40</td>
327
+ <td align="center"><strong>8.85</strong></td>
328
+ <td align="center">9.85</td>
329
+ </tr>
330
+ </tbody>
331
+ </table>
332
+
333
+ ### Paralinguistic information understanding
334
+ StepEval-Audio-Paralinguistic
335
+ <table border="1" cellpadding="5" cellspacing="0" align="center">
336
+ <thead>
337
+ <tr>
338
+ <th style="text-align: center;" rowspan="2">Model</th>
339
+ <th style="text-align: center;" rowspan="2">Avg.</th>
340
+ <th style="text-align: center;" rowspan="2">Gender</th>
341
+ <th style="text-align: center;" rowspan="2">Age</th>
342
+ <th style="text-align: center;" rowspan="2">Timbre</th>
343
+ <th style="text-align: center;" rowspan="2">Scenario</th>
344
+ <th style="text-align: center;" rowspan="2">Event</th>
345
+ <th style="text-align: center;" rowspan="2">Emotion</th>
346
+ <th style="text-align: center;" rowspan="2">Pitch</th>
347
+ <th style="text-align: center;" rowspan="2">Rhythm</th>
348
+ <th style="text-align: center;" rowspan="2">Speed</th>
349
+ <th style="text-align: center;" rowspan="2">Style</th>
350
+ <th style="text-align: center;" rowspan="2">Vocal</th>
351
+ </tr>
352
+ </thead>
353
+ <tbody>
354
+ <tr>
355
+ <td align="left"><strong>GPT-4o Audio</strong></td>
356
+ <td align="center">43.45</td>
357
+ <td align="center">18</td>
358
+ <td align="center">42</td>
359
+ <td align="center">34</td>
360
+ <td align="center">22</td>
361
+ <td align="center">14</td>
362
+ <td align="center">82</td>
363
+ <td align="center">40</td>
364
+ <td align="center">60</td>
365
+ <td align="center">58</td>
366
+ <td align="center">64</td>
367
+ <td align="center">44</td>
368
+ </tr>
369
+ <tr>
370
+ <td align="left"><strong>Kimi-Audio</strong></td>
371
+ <td align="center">49.64</td>
372
+ <td align="center">94</td>
373
+ <td align="center">50</td>
374
+ <td align="center">10</td>
375
+ <td align="center">30</td>
376
+ <td align="center">48</td>
377
+ <td align="center">66</td>
378
+ <td align="center">56</td>
379
+ <td align="center">40</td>
380
+ <td align="center">44</td>
381
+ <td align="center">54</td>
382
+ <td align="center">54</td>
383
+ </tr>
384
+ <tr>
385
+ <td align="left"><strong>Qwen-Omni</strong></td>
386
+ <td align="center">44.18</td>
387
+ <td align="center">40</td>
388
+ <td align="center">50</td>
389
+ <td align="center">16</td>
390
+ <td align="center">28</td>
391
+ <td align="center">42</td>
392
+ <td align="center">76</td>
393
+ <td align="center">32</td>
394
+ <td align="center">54</td>
395
+ <td align="center">50</td>
396
+ <td align="center">50</td>
397
+ <td align="center">48</td>
398
+ </tr>
399
+ <tr>
400
+ <td align="left"><strong>Step-Audio-AQAA</strong></td>
401
+ <td align="center">36.91</td>
402
+ <td align="center">70</td>
403
+ <td align="center">66</td>
404
+ <td align="center">18</td>
405
+ <td align="center">14</td>
406
+ <td align="center">14</td>
407
+ <td align="center">40</td>
408
+ <td align="center">38</td>
409
+ <td align="center">48</td>
410
+ <td align="center">54</td>
411
+ <td align="center">44</td>
412
+ <td align="center">0</td>
413
+ </tr>
414
+ <tr>
415
+ <td align="left"><strong>Step-Audio 2</strong></td>
416
+ <td align="center"><strong>83.09</strong></td>
417
+ <td align="center"><strong>100</strong></td>
418
+ <td align="center"><strong>96</strong></td>
419
+ <td align="center"><strong>82</strong></td>
420
+ <td align="center"><strong>78</strong></td>
421
+ <td align="center"><strong>60</strong></td>
422
+ <td align="center"><strong>86</strong></td>
423
+ <td align="center"><strong>82</strong></td>
424
+ <td align="center"><strong>86</strong></td>
425
+ <td align="center"><strong>88</strong></td>
426
+ <td align="center"><strong>88</strong></td>
427
+ <td align="center">68</td>
428
+ </tr>
429
+ <tr>
430
+ <td align="left"><strong>Step-Audio 2 mini</strong></td>
431
+ <td align="center">80.00</td>
432
+ <td align="center"><strong>100</strong></td>
433
+ <td align="center">94</td>
434
+ <td align="center">80</td>
435
+ <td align="center"><strong>78</strong></td>
436
+ <td align="center"><strong>60</strong></td>
437
+ <td align="center">82</td>
438
+ <td align="center"><strong>82</strong></td>
439
+ <td align="center">68</td>
440
+ <td align="center">74</td>
441
+ <td align="center">86</td>
442
+ <td align="center"><strong>76</strong></td>
443
+ </tr>
444
+ </tbody>
445
+ </table>
446
+
447
+ ### Audio understanding and reasoning
448
+ MMAU
449
+ <table border="1" cellpadding="5" cellspacing="0" align="center">
450
+ <thead>
451
+ <tr>
452
+ <th style="text-align: center;">Model</th>
453
+ <th style="text-align: center;">Avg.</th>
454
+ <th style="text-align: center;">Sound</th>
455
+ <th style="text-align: center;">Speech</th>
456
+ <th style="text-align: center;">Music</th>
457
+ </tr>
458
+ </thead>
459
+ <tbody>
460
+ <tr>
461
+ <td align="left"><strong>Audio Flamingo 3</strong></td>
462
+ <td align="center">73.1</td>
463
+ <td align="center">76.9</td>
464
+ <td align="center">66.1</td>
465
+ <td align="center"><strong>73.9</strong></td>
466
+ </tr>
467
+ <tr>
468
+ <td align="left"><strong>Gemini 2.5 Pro</strong></td>
469
+ <td align="center">71.6</td>
470
+ <td align="center">75.1</td>
471
+ <td align="center">71.5</td>
472
+ <td align="center">68.3</td>
473
+ </tr>
474
+ <tr>
475
+ <td align="left"><strong>GPT-4o Audio</strong></td>
476
+ <td align="center">58.1</td>
477
+ <td align="center">58.0</td>
478
+ <td align="center">64.6</td>
479
+ <td align="center">51.8</td>
480
+ </tr>
481
+ <tr>
482
+ <td align="left"><strong>Kimi-Audio</strong></td>
483
+ <td align="center">69.6</td>
484
+ <td align="center">79.0</td>
485
+ <td align="center">65.5</td>
486
+ <td align="center">64.4</td>
487
+ </tr>
488
+ <tr>
489
+ <td align="left"><strong>Omni-R1</strong></td>
490
+ <td align="center">77.0</td>
491
+ <td align="center">81.7</td>
492
+ <td align="center">76.0</td>
493
+ <td align="center">73.4</td>
494
+ </tr>
495
+ <tr>
496
+ <td align="left"><strong>Qwen2.5-Omni</strong></td>
497
+ <td align="center">71.5</td>
498
+ <td align="center">78.1</td>
499
+ <td align="center">70.6</td>
500
+ <td align="center">65.9</td>
501
+ </tr>
502
+ <tr>
503
+ <td align="left"><strong>Step-Audio-AQAA</strong></td>
504
+ <td align="center">49.7</td>
505
+ <td align="center">50.5</td>
506
+ <td align="center">51.4</td>
507
+ <td align="center">47.3</td>
508
+ </tr>
509
+ <tr>
510
+ <td align="left"><strong>Step-Audio 2</strong></td>
511
+ <td align="center"><strong>78.0</strong></td>
512
+ <td align="center"><strong>83.5</strong></td>
513
+ <td align="center"><strong>76.9</strong></td>
514
+ <td align="center">73.7</td>
515
+ </tr>
516
+ <tr>
517
+ <td align="left"><strong>Step-Audio 2 mini</strong></td>
518
+ <td align="center">73.2</td>
519
+ <td align="center">76.6</td>
520
+ <td align="center">71.5</td>
521
+ <td align="center">71.6</td>
522
+ </tr>
523
+ </tbody>
524
+ </table>
525
+
526
+ ### Speech translation
527
+
528
+ <table border="1" cellpadding="5" cellspacing="0" align="center">
529
+ <thead>
530
+ <tr>
531
+ <th style="text-align: center;" rowspan="2">Model</th>
532
+ <th style="text-align: center;" colspan="3">CoVoST 2 (S2TT)</th>
533
+ </tr>
534
+ <tr>
535
+ <th>Avg.</th>
536
+ <th>English-to-Chinese</th>
537
+ <th>Chinese-to-English</th>
538
+ </tr>
539
+ </thead>
540
+ <tbody>
541
+ <tr>
542
+ <td align="left"><strong>GPT-4o Audio</strong></td>
543
+ <td align="center">29.61</td>
544
+ <td align="center">40.20</td>
545
+ <td align="center">19.01</td>
546
+ </tr>
547
+ <tr>
548
+ <td align="left"><strong>Qwen2.5-Omni</strong></td>
549
+ <td align="center">35.40</td>
550
+ <td align="center">41.40</td>
551
+ <td align="center">29.40</td>
552
+ </tr>
553
+ <tr>
554
+ <td align="left"><strong>Step-Audio-AQAA</strong></td>
555
+ <td align="center">28.57</td>
556
+ <td align="center">37.71</td>
557
+ <td align="center">19.43</td>
558
+ </tr>
559
+ <tr>
560
+ <td align="left"><strong>Step-Audio 2</strong></td>
561
+ <td align="center">39.26</td>
562
+ <td align="center">49.01</td>
563
+ <td align="center"><strong>29.51</strong></td>
564
+ </tr>
565
+ <tr>
566
+ <td align="left"><strong>Step-Audio 2 mini</strong></td>
567
+ <td align="center"><strong>39.29</strong></td>
568
+ <td align="center"><strong>49.12</strong></td>
569
+ <td align="center">29.47</td>
570
+ </tr>
571
+ </tbody>
572
+ </table>
573
+
574
+ <table border="1" cellpadding="5" cellspacing="0" align="center">
575
+ <thead>
576
+ <tr>
577
+ <th style="text-align: center;" rowspan="2">Model</th>
578
+ <th style="text-align: center;" colspan="3">CVSS (S2ST)</th>
579
+ </tr>
580
+ <tr>
581
+ <th>Avg.</th>
582
+ <th>English-to-Chinese</th>
583
+ <th>Chinese-to-English</th>
584
+ </tr>
585
+ </thead>
586
+ <tbody>
587
+ <tr>
588
+ <td align="left"><strong>GPT-4o Audio</strong></td>
589
+ <td align="center">23.68</td>
590
+ <td align="center">20.07</td>
591
+ <td align="center"><strong>27.29</strong></td>
592
+ </tr>
593
+ <tr>
594
+ <td align="left"><strong>Qwen-Omni</strong></td>
595
+ <td align="center">15.35</td>
596
+ <td align="center">8.04</td>
597
+ <td align="center">22.66</td>
598
+ </tr>
599
+ <tr>
600
+ <td align="left"><strong>Step-Audio-AQAA</strong></td>
601
+ <td align="center">27.36</td>
602
+ <td align="center">30.74</td>
603
+ <td align="center">23.98</td>
604
+ </tr>
605
+ <tr>
606
+ <td align="left"><strong>Step-Audio 2</strong></td>
607
+ <td align="center"><strong>30.87</strong></td>
608
+ <td align="center"><strong>34.83</strong></td>
609
+ <td align="center">26.92</td>
610
+ </tr>
611
+ <tr>
612
+ <td align="left"><strong>Step-Audio 2 mini</strong></td>
613
+ <td align="center">29.08</td>
614
+ <td align="center">32.81</td>
615
+ <td align="center">25.35</td>
616
+ </tr>
617
+ </tbody>
618
+ </table>
619
+
620
+ ### Tool calling
621
+ StepEval-Audio-Toolcall. Date and time tools have no parameter.
622
+ <table border="1" cellpadding="5" cellspacing="0" align="center">
623
+ <thead>
624
+ <tr>
625
+ <th style="text-align: center;">Model</th>
626
+ <th style="text-align: center;">Objective</th>
627
+ <th style="text-align: center;">Metric</th>
628
+ <th style="text-align: center;">Audio search</th>
629
+ <th style="text-align: center;">Date & Time</th>
630
+ <th style="text-align: center;">Weather</th>
631
+ <th style="text-align: center;">Web search</th>
632
+ </tr>
633
+ </thead>
634
+ <tbody>
635
+ <tr>
636
+ <td style="text-align: center; vertical-align: middle;" rowspan="3"><strong>Qwen3-32B</strong><sup>†</sup></td>
637
+ <td align="center"><strong>Trigger</strong></td>
638
+ <td align="center"><strong>Precision / Recall</strong></td>
639
+ <td align="center">67.5 / 98.5</td>
640
+ <td align="center">98.4 / 100.0</td>
641
+ <td align="center">90.1 / 100.0</td>
642
+ <td align="center">86.8 / 98.5</td>
643
+ </tr>
644
+ <tr>
645
+ <td align="center"><strong>Type</strong></td>
646
+ <td align="center"><strong>Accuracy</strong></td>
647
+ <td align="center">100.0</td>
648
+ <td align="center">100.0</td>
649
+ <td align="center">98.5</td>
650
+ <td align="center">98.5</td>
651
+ </tr>
652
+ <tr>
653
+ <td align="center"><strong>Parameter</strong></td>
654
+ <td align="center"><strong>Accuracy</strong></td>
655
+ <td align="center">100.0</td>
656
+ <td align="center">N/A</td>
657
+ <td align="center">100.0</td>
658
+ <td align="center">100.0</td>
659
+ </tr>
660
+ <tr>
661
+ <td style="text-align: center; vertical-align: middle;" rowspan="3"><strong>Step-Audio 2</strong></td>
662
+ <td align="center"><strong>Trigger</strong></td>
663
+ <td align="center"><strong>Precision / Recall</strong></td>
664
+ <td align="center">86.8 / 99.5</td>
665
+ <td align="center">96.9 / 98.4</td>
666
+ <td align="center">92.2 / 100.0</td>
667
+ <td align="center">88.4 / 95.5</td>
668
+ </tr>
669
+ <tr>
670
+ <td align="center"><strong>Type</strong></td>
671
+ <td align="center"><strong>Accuracy</strong></td>
672
+ <td align="center">100.0</td>
673
+ <td align="center">100.0</td>
674
+ <td align="center">90.5</td>
675
+ <td align="center">98.4</td>
676
+ </tr>
677
+ <tr>
678
+ <td align="center"><strong>Parameter</strong></td>
679
+ <td align="center"><strong>Accuracy</strong></td>
680
+ <td align="center">100.0</td>
681
+ <td align="center">N/A</td>
682
+ <td align="center">100.0</td>
683
+ <td align="center">100.0</td>
684
+ </tr>
685
+ </tbody>
686
+ </table>
687
+
688
+ ### Speech-to-speech conversation
689
+ URO-Bench. U. R. O. stands for understanding, reasoning, and oral conversation, respectively.
690
+
691
+ <table border="1" cellpadding="5" cellspacing="0" align="center">
692
+ <thead>
693
+ <tr>
694
+ <th style="text-align: center;" rowspan="2">Model</th>
695
+ <th style="text-align: center;" rowspan="2">Language</th>
696
+ <th style="text-align: center;" colspan="4">Basic</th>
697
+ <th style="text-align: center;" colspan="4">Pro</th>
698
+ </tr>
699
+ <tr>
700
+ <th style="text-align: center;">Avg.</th>
701
+ <th style="text-align: center;">U.</th>
702
+ <th style="text-align: center;">R.</th>
703
+ <th style="text-align: center;">O.</th>
704
+ <th style="text-align: center;">Avg.</th>
705
+ <th style="text-align: center;">U.</th>
706
+ <th style="text-align: center;">R.</th>
707
+ <th style="text-align: center;">O.</th>
708
+ </tr>
709
+ </thead>
710
+ <tbody>
711
+ <tr>
712
+ <td align="left"><strong>GPT-4o Audio</strong></td>
713
+ <td rowspan="6" style="text-align: center; vertical-align: middle;"><strong>Chinese</strong></td>
714
+ <td align="center">78.59</td>
715
+ <td align="center">89.40</td>
716
+ <td align="center">65.48</td>
717
+ <td align="center">85.24</td>
718
+ <td align="center">67.10</td>
719
+ <td align="center">70.60</td>
720
+ <td align="center">57.22</td>
721
+ <td align="center">70.20</td>
722
+ </tr>
723
+ <tr>
724
+ <td align="left"><strong>Kimi-Audio</strong></td>
725
+ <td align="center">73.59</td>
726
+ <td align="center">79.34</td>
727
+ <td align="center">64.66</td>
728
+ <td align="center">79.75</td>
729
+ <td align="center">66.07</td>
730
+ <td align="center">60.44</td>
731
+ <td align="center">59.29</td>
732
+ <td align="center"><strong>76.21</strong></td>
733
+ </tr>
734
+ <tr>
735
+ <td align="left"><strong>Qwen-Omni</strong></td>
736
+ <td align="center">68.98</td>
737
+ <td align="center">59.66</td>
738
+ <td align="center">69.74</td>
739
+ <td align="center">77.27</td>
740
+ <td align="center">59.11</td>
741
+ <td align="center">59.01</td>
742
+ <td align="center">59.82</td>
743
+ <td align="center">58.74</td>
744
+ </tr>
745
+ <tr>
746
+ <td align="left"><strong>Step-Audio-AQAA</strong></td>
747
+ <td align="center">74.71</td>
748
+ <td align="center">87.61</td>
749
+ <td align="center">59.63</td>
750
+ <td align="center">81.93</td>
751
+ <td align="center">65.61</td>
752
+ <td align="center">74.76</td>
753
+ <td align="center">47.29</td>
754
+ <td align="center">68.97</td>
755
+ </tr>
756
+ <tr>
757
+ <td align="left"><strong>Step-Audio 2</strong></td>
758
+ <td align="center"><strong>83.32</strong></td>
759
+ <td align="center"><strong>91.05</strong></td>
760
+ <td align="center"><strong>75.45</strong></td>
761
+ <td align="center"><strong>86.08</strong></td>
762
+ <td align="center">68.25</td>
763
+ <td align="center">74.78</td>
764
+ <td align="center"><strong>63.18</strong></td>
765
+ <td align="center">65.10</td>
766
+ </tr>
767
+ <tr>
768
+ <td align="left"><strong>Step-Audio 2 mini</strong></td>
769
+ <td align="center">77.81</td>
770
+ <td align="center">89.19</td>
771
+ <td align="center">64.53</td>
772
+ <td align="center">84.12</td>
773
+ <td align="center"><strong>69.57</strong></td>
774
+ <td align="center"><strong>76.84</strong></td>
775
+ <td align="center">58.90</td>
776
+ <td align="center">69.42</td>
777
+ </tr>
778
+ <tr>
779
+ <td align="left"><strong>GPT-4o Audio</strong></td>
780
+ <td rowspan="6" style="text-align: center; vertical-align: middle;"><strong>English</strong></td>
781
+ <td align="center"><strong>84.54</strong></td>
782
+ <td align="center">90.18</td>
783
+ <td align="center">75.90</td>
784
+ <td align="center"><strong>90.41</strong></td>
785
+ <td align="center"><strong>67.51</strong></td>
786
+ <td align="center">60.65</td>
787
+ <td align="center">64.36</td>
788
+ <td align="center"><strong>78.46</strong></td>
789
+ </tr>
790
+ <tr>
791
+ <td align="left"><strong>Kimi-Audio</strong></td>
792
+ <td align="center">60.04</td>
793
+ <td align="center">83.36</td>
794
+ <td align="center">42.31</td>
795
+ <td align="center">60.36</td>
796
+ <td align="center">49.79</td>
797
+ <td align="center">50.32</td>
798
+ <td align="center">40.59</td>
799
+ <td align="center">56.04</td>
800
+ </tr>
801
+ <tr>
802
+ <td align="left"><strong>Qwen-Omni</strong></td>
803
+ <td align="center">70.58</td>
804
+ <td align="center">66.29</td>
805
+ <td align="center">69.62</td>
806
+ <td align="center">76.16</td>
807
+ <td align="center">50.99</td>
808
+ <td align="center">44.51</td>
809
+ <td align="center">63.88</td>
810
+ <td align="center">49.41</td>
811
+ </tr>
812
+ <tr>
813
+ <td align="left"><strong>Step-Audio-AQAA</strong></td>
814
+ <td align="center">71.11</td>
815
+ <td align="center">90.15</td>
816
+ <td align="center">56.12</td>
817
+ <td align="center">72.06</td>
818
+ <td align="center">52.01</td>
819
+ <td align="center">44.25</td>
820
+ <td align="center">54.54</td>
821
+ <td align="center">59.81</td>
822
+ </tr>
823
+ <tr>
824
+ <td align="left"><strong>Step-Audio 2</strong></td>
825
+ <td align="center">83.90</td>
826
+ <td align="center"><strong>92.72</strong></td>
827
+ <td align="center"><strong>76.51</strong></td>
828
+ <td align="center">84.92</td>
829
+ <td align="center">66.07</td>
830
+ <td align="center"><strong>64.86</strong></td>
831
+ <td align="center"><strong>67.75</strong></td>
832
+ <td align="center">66.33</td>
833
+ </tr>
834
+ <tr>
835
+ <td align="left"><strong>Step-Audio 2 mini</strong></td>
836
+ <td align="center">74.36</td>
837
+ <td align="center">90.07</td>
838
+ <td align="center">60.12</td>
839
+ <td align="center">77.65</td>
840
+ <td align="center">61.25</td>
841
+ <td align="center">58.79</td>
842
+ <td align="center">61.94</td>
843
+ <td align="center">63.80</td>
844
+ </tr>
845
+ </tbody>
846
+ </table>
847
+
848
+ <!-- ## Online Engine
849
+ The online version of Step-Audio can be accessed from app version of [跃问](https://yuewen.cn), where some impressive examples can be found as well.
850
+
851
+ <img src="./assets/yuewen.jpeg" width="200" alt="QR code"> -->
852
+
853
+ ## License
854
+
855
+ The model and code in the repository is licensed under [Apache 2.0](LICENSE) License.
856
+
857
+ ## Citation
858
+
859
+ ```
860
+ @misc{wu2025stepaudio2technicalreport,
861
+ title={Step-Audio 2 Technical Report},
862
+ author={Boyong Wu and Chao Yan and Chen Hu and Cheng Yi and Chengli Feng and Fei Tian and Feiyu Shen and Gang Yu and Haoyang Zhang and Jingbei Li and Mingrui Chen and Peng Liu and Wang You and Xiangyu Tony Zhang and Xingyuan Li and Xuerui Yang and Yayue Deng and Yechang Huang and Yuxin Li and Yuxin Zhang and Zhao You and Brian Li and Changyi Wan and Hanpeng Hu and Jiangjie Zhen and Siyu Chen and Song Yuan and Xuelin Zhang and Yimin Jiang and Yu Zhou and Yuxiang Yang and Bingxin Li and Buyun Ma and Changhe Song and Dongqing Pang and Guoqiang Hu and Haiyang Sun and Kang An and Na Wang and Shuli Gao and Wei Ji and Wen Li and Wen Sun and Xuan Wen and Yong Ren and Yuankai Ma and Yufan Lu and Bin Wang and Bo Li and Changxin Miao and Che Liu and Chen Xu and Dapeng Shi and Dingyuan Hu and Donghang Wu and Enle Liu and Guanzhe Huang and Gulin Yan and Han Zhang and Hao Nie and Haonan Jia and Hongyu Zhou and Jianjian Sun and Jiaoren Wu and Jie Wu and Jie Yang and Jin Yang and Junzhe Lin and Kaixiang Li and Lei Yang and Liying Shi and Li Zhou and Longlong Gu and Ming Li and Mingliang Li and Mingxiao Li and Nan Wu and Qi Han and Qinyuan Tan and Shaoliang Pang and Shengjie Fan and Siqi Liu and Tiancheng Cao and Wanying Lu and Wenqing He and Wuxun Xie and Xu Zhao and Xueqi Li and Yanbo Yu and Yang Yang and Yi Liu and Yifan Lu and Yilei Wang and Yuanhao Ding and Yuanwei Liang and Yuanwei Lu and Yuchu Luo and Yuhe Yin and Yumeng Zhan and Yuxiang Zhang and Zidong Yang and Zixin Zhang and Binxing Jiao and Daxin Jiang and Heung-Yeung Shum and Jiansheng Chen and Jing Li and Xiangyu Zhang and Yibo Zhu},
863
+ year={2025},
864
+ eprint={2507.16632},
865
+ archivePrefix={arXiv},
866
+ primaryClass={cs.CL},
867
+ url={https://arxiv.org/abs/2507.16632},
868
+ }
869
+ ```
models/Step-Audio-2-mini-Think/added_tokens.json ADDED
The diff for this file is too large to render. See raw diff
 
models/Step-Audio-2-mini-Think/assets/architecture5.png ADDED

Git LFS Details

  • SHA256: 3c85a631baf8687bf35edaa22a76af8b641ece3cefc114befd6c9f27f72f73b0
  • Pointer size: 132 Bytes
  • Size of remote file: 1.55 MB
models/Step-Audio-2-mini-Think/assets/arxiv.svg ADDED
models/Step-Audio-2-mini-Think/assets/logo.png ADDED
models/Step-Audio-2-mini-Think/assets/qrcode.jpg ADDED
models/Step-Audio-2-mini-Think/assets/radar.png ADDED

Git LFS Details

  • SHA256: f7c16383ccd791312635793fb214aa53cce9a0b2135ab9e56672d873fee231ab
  • Pointer size: 132 Bytes
  • Size of remote file: 1.73 MB
models/Step-Audio-2-mini-Think/assets/wechat_group.png ADDED
models/Step-Audio-2-mini-Think/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "StepAudio2ForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_step_audio_2.StepAudio2Config",
7
+ "AutoModelForCausalLM": "modeling_step_audio_2.StepAudio2ForCausalLM"
8
+ },
9
+ "model_type": "step_audio_2",
10
+ "text_config": {
11
+ "hidden_size": 3584,
12
+ "intermediate_size": 18944,
13
+ "num_attention_heads": 28,
14
+ "num_attention_groups": 4,
15
+ "num_key_value_heads": 4,
16
+ "num_hidden_layers": 28,
17
+ "max_seq_len": 16384,
18
+ "vocab_size": 158720,
19
+ "rms_norm_eps": 1e-06,
20
+ "eos_token_id": 151643,
21
+ "pad_token_id": 151643,
22
+ "rope_theta": 1000000.0,
23
+ "max_position_embeddings": 16384,
24
+ "rope_scaling": null,
25
+ "torch_dtype": "bfloat16"
26
+ },
27
+ "audio_encoder_config": {
28
+ "n_mels": 128,
29
+ "n_audio_ctx": 1500,
30
+ "n_audio_state": 1280,
31
+ "n_audio_head": 20,
32
+ "n_audio_layer": 32,
33
+ "n_codebook_size": 4096,
34
+ "llm_dim": 3584,
35
+ "kernel_size": 3,
36
+ "adapter_stride": 2
37
+ }
38
+ }
models/Step-Audio-2-mini-Think/configuration_step_audio_2.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union
2
+
3
+ from transformers import Qwen2Config
4
+ from transformers.configuration_utils import PretrainedConfig
5
+
6
+
7
+ class StepAudio2EncoderConfig(PretrainedConfig):
8
+ model_type = "step_audio_2_encoder"
9
+
10
+ def __init__(
11
+ self,
12
+ n_mels=128,
13
+ n_audio_ctx=1500,
14
+ n_audio_state=512,
15
+ n_audio_head=8,
16
+ n_audio_layer=6,
17
+ llm_dim=4096,
18
+ kernel_size=3,
19
+ adapter_stride=2,
20
+ **kwargs,
21
+ ):
22
+ self.n_mels = n_mels
23
+ self.n_audio_ctx = n_audio_ctx
24
+ self.n_audio_state = n_audio_state
25
+ self.n_audio_head = n_audio_head
26
+ self.n_audio_layer = n_audio_layer
27
+ self.llm_dim = llm_dim
28
+ self.kernel_size = kernel_size
29
+ self.adapter_stride = adapter_stride
30
+ super().__init__(**kwargs)
31
+
32
+ class StepAudio2TextConfig(PretrainedConfig):
33
+ model_type = "step_audio_2_text"
34
+
35
+ def __init__(
36
+ self,
37
+ vocab_size=64012,
38
+ hidden_size=4096,
39
+ intermediate_size=11008,
40
+ num_hidden_layers=48,
41
+ num_attention_heads=32,
42
+ num_attention_groups=4,
43
+ num_key_value_heads=4,
44
+ hidden_act="silu",
45
+ max_position_embeddings=8192,
46
+ initializer_range=0.02,
47
+ rms_norm_eps=1e-6,
48
+ rope_theta=1000000.0,
49
+ rope_scaling=None,
50
+ eos_token_id=None,
51
+ **kwargs
52
+ ):
53
+
54
+ if eos_token_id is not None:
55
+ if isinstance(eos_token_id, list):
56
+ eos_token_id = list(set([151643, 151645, 151665] + eos_token_id))
57
+ else:
58
+ eos_token_id = [151643, 151645, 151665, eos_token_id]
59
+ else:
60
+ eos_token_id = [151643, 151645, 151665]
61
+
62
+ super().__init__(
63
+ eos_token_id=eos_token_id,
64
+ **kwargs)
65
+
66
+ self.vocab_size = vocab_size
67
+ self.hidden_size = hidden_size
68
+ self.intermediate_size = intermediate_size
69
+ self.num_hidden_layers = num_hidden_layers
70
+ self.num_attention_heads = num_attention_heads
71
+ self.num_attention_groups = num_attention_groups
72
+ self.num_key_value_heads = num_key_value_heads
73
+ assert self.num_attention_groups == self.num_key_value_heads, "num_attention_groups must be equal to num_key_value_heads"
74
+ self.hidden_act = hidden_act
75
+ self.max_position_embeddings = max_position_embeddings
76
+ self.initializer_range = initializer_range
77
+ self.rms_norm_eps = rms_norm_eps
78
+ self.rope_theta = rope_theta
79
+ self.rope_scaling = rope_scaling
80
+
81
+ self.text_config = Qwen2Config(
82
+ vocab_size=vocab_size,
83
+ hidden_size=hidden_size,
84
+ intermediate_size=intermediate_size,
85
+ num_hidden_layers=num_hidden_layers,
86
+ num_attention_heads=num_attention_heads,
87
+ num_key_value_heads=num_key_value_heads,
88
+ hidden_act=hidden_act,
89
+ max_position_embeddings=max_position_embeddings,
90
+ initializer_range=initializer_range,
91
+ rms_norm_eps=rms_norm_eps,
92
+ rope_theta=rope_theta,
93
+ rope_scaling=rope_scaling,
94
+ architectures=["Qwen2ForCausalLM"],
95
+ torch_dtype=getattr(self, "torch_dtype", "bfloat16"),
96
+ )
97
+
98
+ class StepAudio2Config(PretrainedConfig):
99
+ model_type = "step_audio_2"
100
+ architectures = ["StepAudio2ForCausalLM"]
101
+
102
+ def __init__(
103
+ self,
104
+ audio_encoder_config :Optional[Union[dict, StepAudio2EncoderConfig]] = None,
105
+ text_config: Optional[Union[dict, StepAudio2TextConfig]] = None,
106
+ use_sliding_window: bool = False,
107
+ sliding_window: Optional[int] = 2048,
108
+ max_window_layers: Optional[int] = None,
109
+ **kwargs
110
+ ):
111
+ kwargs.setdefault("use_sliding_window", use_sliding_window)
112
+ kwargs.setdefault("sliding_window", sliding_window)
113
+ if max_window_layers is None:
114
+ max_window_layers = kwargs.get("num_hidden_layers", None)
115
+ kwargs.setdefault("max_window_layers", max_window_layers)
116
+ super().__init__(**kwargs)
117
+
118
+ if text_config is None:
119
+ text_config = StepAudio2TextConfig().text_config
120
+ elif isinstance(text_config, dict):
121
+ text_config = StepAudio2TextConfig(**text_config).text_config
122
+
123
+ self.text_config = text_config
124
+
125
+ if audio_encoder_config is None:
126
+ self.audio_encoder_config = StepAudio2EncoderConfig()
127
+ elif isinstance(audio_encoder_config, dict):
128
+ self.audio_encoder_config = StepAudio2EncoderConfig(**audio_encoder_config)
models/Step-Audio-2-mini-Think/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/Step-Audio-2-mini-Think/model-00001.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54f255947ecd9537fc0767627384583acf711f4ab324050706fe033fb5092446
3
+ size 9925030808
models/Step-Audio-2-mini-Think/model-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:258e9aebf0942f27a430a72d13adb6461126c654387e84d3a9656d0a9383b555
3
+ size 6705418376
models/Step-Audio-2-mini-Think/model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"total_size": 16630358528}, "weight_map": {"encoder.conv1.weight": "model-00001.safetensors", "encoder.conv1.bias": "model-00001.safetensors", "encoder.conv2.weight": "model-00001.safetensors", "encoder.conv2.bias": "model-00001.safetensors", "encoder.positional_embedding.weight": "model-00001.safetensors", "encoder.blocks.0.attn.query.weight": "model-00001.safetensors", "encoder.blocks.0.attn.query.bias": "model-00001.safetensors", "encoder.blocks.0.attn.key.weight": "model-00001.safetensors", "encoder.blocks.0.attn.value.weight": "model-00001.safetensors", "encoder.blocks.0.attn.value.bias": "model-00001.safetensors", "encoder.blocks.0.attn.out.weight": "model-00001.safetensors", "encoder.blocks.0.attn.out.bias": "model-00001.safetensors", "encoder.blocks.0.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.0.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.0.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.0.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.0.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.0.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.0.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.0.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.1.attn.query.weight": "model-00001.safetensors", "encoder.blocks.1.attn.query.bias": "model-00001.safetensors", "encoder.blocks.1.attn.key.weight": "model-00001.safetensors", "encoder.blocks.1.attn.value.weight": "model-00001.safetensors", "encoder.blocks.1.attn.value.bias": "model-00001.safetensors", "encoder.blocks.1.attn.out.weight": "model-00001.safetensors", "encoder.blocks.1.attn.out.bias": "model-00001.safetensors", "encoder.blocks.1.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.1.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.1.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.1.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.1.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.1.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.1.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.1.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.2.attn.query.weight": "model-00001.safetensors", "encoder.blocks.2.attn.query.bias": "model-00001.safetensors", "encoder.blocks.2.attn.key.weight": "model-00001.safetensors", "encoder.blocks.2.attn.value.weight": "model-00001.safetensors", "encoder.blocks.2.attn.value.bias": "model-00001.safetensors", "encoder.blocks.2.attn.out.weight": "model-00001.safetensors", "encoder.blocks.2.attn.out.bias": "model-00001.safetensors", "encoder.blocks.2.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.2.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.2.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.2.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.2.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.2.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.2.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.2.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.3.attn.query.weight": "model-00001.safetensors", "encoder.blocks.3.attn.query.bias": "model-00001.safetensors", "encoder.blocks.3.attn.key.weight": "model-00001.safetensors", "encoder.blocks.3.attn.value.weight": "model-00001.safetensors", "encoder.blocks.3.attn.value.bias": "model-00001.safetensors", "encoder.blocks.3.attn.out.weight": "model-00001.safetensors", "encoder.blocks.3.attn.out.bias": "model-00001.safetensors", "encoder.blocks.3.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.3.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.3.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.3.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.3.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.3.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.3.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.3.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.4.attn.query.weight": "model-00001.safetensors", "encoder.blocks.4.attn.query.bias": "model-00001.safetensors", "encoder.blocks.4.attn.key.weight": "model-00001.safetensors", "encoder.blocks.4.attn.value.weight": "model-00001.safetensors", "encoder.blocks.4.attn.value.bias": "model-00001.safetensors", "encoder.blocks.4.attn.out.weight": "model-00001.safetensors", "encoder.blocks.4.attn.out.bias": "model-00001.safetensors", "encoder.blocks.4.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.4.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.4.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.4.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.4.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.4.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.4.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.4.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.5.attn.query.weight": "model-00001.safetensors", "encoder.blocks.5.attn.query.bias": "model-00001.safetensors", "encoder.blocks.5.attn.key.weight": "model-00001.safetensors", "encoder.blocks.5.attn.value.weight": "model-00001.safetensors", "encoder.blocks.5.attn.value.bias": "model-00001.safetensors", "encoder.blocks.5.attn.out.weight": "model-00001.safetensors", "encoder.blocks.5.attn.out.bias": "model-00001.safetensors", "encoder.blocks.5.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.5.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.5.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.5.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.5.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.5.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.5.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.5.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.6.attn.query.weight": "model-00001.safetensors", "encoder.blocks.6.attn.query.bias": "model-00001.safetensors", "encoder.blocks.6.attn.key.weight": "model-00001.safetensors", "encoder.blocks.6.attn.value.weight": "model-00001.safetensors", "encoder.blocks.6.attn.value.bias": "model-00001.safetensors", "encoder.blocks.6.attn.out.weight": "model-00001.safetensors", "encoder.blocks.6.attn.out.bias": "model-00001.safetensors", "encoder.blocks.6.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.6.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.6.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.6.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.6.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.6.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.6.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.6.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.7.attn.query.weight": "model-00001.safetensors", "encoder.blocks.7.attn.query.bias": "model-00001.safetensors", "encoder.blocks.7.attn.key.weight": "model-00001.safetensors", "encoder.blocks.7.attn.value.weight": "model-00001.safetensors", "encoder.blocks.7.attn.value.bias": "model-00001.safetensors", "encoder.blocks.7.attn.out.weight": "model-00001.safetensors", "encoder.blocks.7.attn.out.bias": "model-00001.safetensors", "encoder.blocks.7.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.7.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.7.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.7.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.7.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.7.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.7.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.7.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.8.attn.query.weight": "model-00001.safetensors", "encoder.blocks.8.attn.query.bias": "model-00001.safetensors", "encoder.blocks.8.attn.key.weight": "model-00001.safetensors", "encoder.blocks.8.attn.value.weight": "model-00001.safetensors", "encoder.blocks.8.attn.value.bias": "model-00001.safetensors", "encoder.blocks.8.attn.out.weight": "model-00001.safetensors", "encoder.blocks.8.attn.out.bias": "model-00001.safetensors", "encoder.blocks.8.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.8.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.8.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.8.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.8.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.8.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.8.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.8.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.9.attn.query.weight": "model-00001.safetensors", "encoder.blocks.9.attn.query.bias": "model-00001.safetensors", "encoder.blocks.9.attn.key.weight": "model-00001.safetensors", "encoder.blocks.9.attn.value.weight": "model-00001.safetensors", "encoder.blocks.9.attn.value.bias": "model-00001.safetensors", "encoder.blocks.9.attn.out.weight": "model-00001.safetensors", "encoder.blocks.9.attn.out.bias": "model-00001.safetensors", "encoder.blocks.9.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.9.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.9.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.9.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.9.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.9.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.9.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.9.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.10.attn.query.weight": "model-00001.safetensors", "encoder.blocks.10.attn.query.bias": "model-00001.safetensors", "encoder.blocks.10.attn.key.weight": "model-00001.safetensors", "encoder.blocks.10.attn.value.weight": "model-00001.safetensors", "encoder.blocks.10.attn.value.bias": "model-00001.safetensors", "encoder.blocks.10.attn.out.weight": "model-00001.safetensors", "encoder.blocks.10.attn.out.bias": "model-00001.safetensors", "encoder.blocks.10.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.10.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.10.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.10.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.10.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.10.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.10.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.10.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.11.attn.query.weight": "model-00001.safetensors", "encoder.blocks.11.attn.query.bias": "model-00001.safetensors", "encoder.blocks.11.attn.key.weight": "model-00001.safetensors", "encoder.blocks.11.attn.value.weight": "model-00001.safetensors", "encoder.blocks.11.attn.value.bias": "model-00001.safetensors", "encoder.blocks.11.attn.out.weight": "model-00001.safetensors", "encoder.blocks.11.attn.out.bias": "model-00001.safetensors", "encoder.blocks.11.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.11.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.11.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.11.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.11.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.11.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.11.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.11.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.12.attn.query.weight": "model-00001.safetensors", "encoder.blocks.12.attn.query.bias": "model-00001.safetensors", "encoder.blocks.12.attn.key.weight": "model-00001.safetensors", "encoder.blocks.12.attn.value.weight": "model-00001.safetensors", "encoder.blocks.12.attn.value.bias": "model-00001.safetensors", "encoder.blocks.12.attn.out.weight": "model-00001.safetensors", "encoder.blocks.12.attn.out.bias": "model-00001.safetensors", "encoder.blocks.12.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.12.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.12.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.12.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.12.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.12.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.12.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.12.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.13.attn.query.weight": "model-00001.safetensors", "encoder.blocks.13.attn.query.bias": "model-00001.safetensors", "encoder.blocks.13.attn.key.weight": "model-00001.safetensors", "encoder.blocks.13.attn.value.weight": "model-00001.safetensors", "encoder.blocks.13.attn.value.bias": "model-00001.safetensors", "encoder.blocks.13.attn.out.weight": "model-00001.safetensors", "encoder.blocks.13.attn.out.bias": "model-00001.safetensors", "encoder.blocks.13.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.13.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.13.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.13.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.13.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.13.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.13.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.13.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.14.attn.query.weight": "model-00001.safetensors", "encoder.blocks.14.attn.query.bias": "model-00001.safetensors", "encoder.blocks.14.attn.key.weight": "model-00001.safetensors", "encoder.blocks.14.attn.value.weight": "model-00001.safetensors", "encoder.blocks.14.attn.value.bias": "model-00001.safetensors", "encoder.blocks.14.attn.out.weight": "model-00001.safetensors", "encoder.blocks.14.attn.out.bias": "model-00001.safetensors", "encoder.blocks.14.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.14.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.14.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.14.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.14.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.14.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.14.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.14.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.15.attn.query.weight": "model-00001.safetensors", "encoder.blocks.15.attn.query.bias": "model-00001.safetensors", "encoder.blocks.15.attn.key.weight": "model-00001.safetensors", "encoder.blocks.15.attn.value.weight": "model-00001.safetensors", "encoder.blocks.15.attn.value.bias": "model-00001.safetensors", "encoder.blocks.15.attn.out.weight": "model-00001.safetensors", "encoder.blocks.15.attn.out.bias": "model-00001.safetensors", "encoder.blocks.15.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.15.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.15.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.15.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.15.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.15.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.15.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.15.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.16.attn.query.weight": "model-00001.safetensors", "encoder.blocks.16.attn.query.bias": "model-00001.safetensors", "encoder.blocks.16.attn.key.weight": "model-00001.safetensors", "encoder.blocks.16.attn.value.weight": "model-00001.safetensors", "encoder.blocks.16.attn.value.bias": "model-00001.safetensors", "encoder.blocks.16.attn.out.weight": "model-00001.safetensors", "encoder.blocks.16.attn.out.bias": "model-00001.safetensors", "encoder.blocks.16.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.16.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.16.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.16.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.16.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.16.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.16.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.16.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.17.attn.query.weight": "model-00001.safetensors", "encoder.blocks.17.attn.query.bias": "model-00001.safetensors", "encoder.blocks.17.attn.key.weight": "model-00001.safetensors", "encoder.blocks.17.attn.value.weight": "model-00001.safetensors", "encoder.blocks.17.attn.value.bias": "model-00001.safetensors", "encoder.blocks.17.attn.out.weight": "model-00001.safetensors", "encoder.blocks.17.attn.out.bias": "model-00001.safetensors", "encoder.blocks.17.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.17.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.17.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.17.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.17.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.17.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.17.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.17.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.18.attn.query.weight": "model-00001.safetensors", "encoder.blocks.18.attn.query.bias": "model-00001.safetensors", "encoder.blocks.18.attn.key.weight": "model-00001.safetensors", "encoder.blocks.18.attn.value.weight": "model-00001.safetensors", "encoder.blocks.18.attn.value.bias": "model-00001.safetensors", "encoder.blocks.18.attn.out.weight": "model-00001.safetensors", "encoder.blocks.18.attn.out.bias": "model-00001.safetensors", "encoder.blocks.18.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.18.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.18.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.18.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.18.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.18.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.18.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.18.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.19.attn.query.weight": "model-00001.safetensors", "encoder.blocks.19.attn.query.bias": "model-00001.safetensors", "encoder.blocks.19.attn.key.weight": "model-00001.safetensors", "encoder.blocks.19.attn.value.weight": "model-00001.safetensors", "encoder.blocks.19.attn.value.bias": "model-00001.safetensors", "encoder.blocks.19.attn.out.weight": "model-00001.safetensors", "encoder.blocks.19.attn.out.bias": "model-00001.safetensors", "encoder.blocks.19.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.19.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.19.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.19.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.19.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.19.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.19.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.19.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.20.attn.query.weight": "model-00001.safetensors", "encoder.blocks.20.attn.query.bias": "model-00001.safetensors", "encoder.blocks.20.attn.key.weight": "model-00001.safetensors", "encoder.blocks.20.attn.value.weight": "model-00001.safetensors", "encoder.blocks.20.attn.value.bias": "model-00001.safetensors", "encoder.blocks.20.attn.out.weight": "model-00001.safetensors", "encoder.blocks.20.attn.out.bias": "model-00001.safetensors", "encoder.blocks.20.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.20.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.20.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.20.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.20.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.20.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.20.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.20.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.21.attn.query.weight": "model-00001.safetensors", "encoder.blocks.21.attn.query.bias": "model-00001.safetensors", "encoder.blocks.21.attn.key.weight": "model-00001.safetensors", "encoder.blocks.21.attn.value.weight": "model-00001.safetensors", "encoder.blocks.21.attn.value.bias": "model-00001.safetensors", "encoder.blocks.21.attn.out.weight": "model-00001.safetensors", "encoder.blocks.21.attn.out.bias": "model-00001.safetensors", "encoder.blocks.21.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.21.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.21.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.21.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.21.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.21.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.21.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.21.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.22.attn.query.weight": "model-00001.safetensors", "encoder.blocks.22.attn.query.bias": "model-00001.safetensors", "encoder.blocks.22.attn.key.weight": "model-00001.safetensors", "encoder.blocks.22.attn.value.weight": "model-00001.safetensors", "encoder.blocks.22.attn.value.bias": "model-00001.safetensors", "encoder.blocks.22.attn.out.weight": "model-00001.safetensors", "encoder.blocks.22.attn.out.bias": "model-00001.safetensors", "encoder.blocks.22.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.22.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.22.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.22.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.22.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.22.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.22.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.22.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.23.attn.query.weight": "model-00001.safetensors", "encoder.blocks.23.attn.query.bias": "model-00001.safetensors", "encoder.blocks.23.attn.key.weight": "model-00001.safetensors", "encoder.blocks.23.attn.value.weight": "model-00001.safetensors", "encoder.blocks.23.attn.value.bias": "model-00001.safetensors", "encoder.blocks.23.attn.out.weight": "model-00001.safetensors", "encoder.blocks.23.attn.out.bias": "model-00001.safetensors", "encoder.blocks.23.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.23.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.23.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.23.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.23.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.23.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.23.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.23.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.24.attn.query.weight": "model-00001.safetensors", "encoder.blocks.24.attn.query.bias": "model-00001.safetensors", "encoder.blocks.24.attn.key.weight": "model-00001.safetensors", "encoder.blocks.24.attn.value.weight": "model-00001.safetensors", "encoder.blocks.24.attn.value.bias": "model-00001.safetensors", "encoder.blocks.24.attn.out.weight": "model-00001.safetensors", "encoder.blocks.24.attn.out.bias": "model-00001.safetensors", "encoder.blocks.24.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.24.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.24.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.24.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.24.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.24.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.24.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.24.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.25.attn.query.weight": "model-00001.safetensors", "encoder.blocks.25.attn.query.bias": "model-00001.safetensors", "encoder.blocks.25.attn.key.weight": "model-00001.safetensors", "encoder.blocks.25.attn.value.weight": "model-00001.safetensors", "encoder.blocks.25.attn.value.bias": "model-00001.safetensors", "encoder.blocks.25.attn.out.weight": "model-00001.safetensors", "encoder.blocks.25.attn.out.bias": "model-00001.safetensors", "encoder.blocks.25.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.25.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.25.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.25.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.25.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.25.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.25.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.25.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.26.attn.query.weight": "model-00001.safetensors", "encoder.blocks.26.attn.query.bias": "model-00001.safetensors", "encoder.blocks.26.attn.key.weight": "model-00001.safetensors", "encoder.blocks.26.attn.value.weight": "model-00001.safetensors", "encoder.blocks.26.attn.value.bias": "model-00001.safetensors", "encoder.blocks.26.attn.out.weight": "model-00001.safetensors", "encoder.blocks.26.attn.out.bias": "model-00001.safetensors", "encoder.blocks.26.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.26.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.26.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.26.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.26.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.26.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.26.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.26.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.27.attn.query.weight": "model-00001.safetensors", "encoder.blocks.27.attn.query.bias": "model-00001.safetensors", "encoder.blocks.27.attn.key.weight": "model-00001.safetensors", "encoder.blocks.27.attn.value.weight": "model-00001.safetensors", "encoder.blocks.27.attn.value.bias": "model-00001.safetensors", "encoder.blocks.27.attn.out.weight": "model-00001.safetensors", "encoder.blocks.27.attn.out.bias": "model-00001.safetensors", "encoder.blocks.27.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.27.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.27.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.27.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.27.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.27.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.27.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.27.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.28.attn.query.weight": "model-00001.safetensors", "encoder.blocks.28.attn.query.bias": "model-00001.safetensors", "encoder.blocks.28.attn.key.weight": "model-00001.safetensors", "encoder.blocks.28.attn.value.weight": "model-00001.safetensors", "encoder.blocks.28.attn.value.bias": "model-00001.safetensors", "encoder.blocks.28.attn.out.weight": "model-00001.safetensors", "encoder.blocks.28.attn.out.bias": "model-00001.safetensors", "encoder.blocks.28.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.28.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.28.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.28.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.28.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.28.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.28.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.28.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.29.attn.query.weight": "model-00001.safetensors", "encoder.blocks.29.attn.query.bias": "model-00001.safetensors", "encoder.blocks.29.attn.key.weight": "model-00001.safetensors", "encoder.blocks.29.attn.value.weight": "model-00001.safetensors", "encoder.blocks.29.attn.value.bias": "model-00001.safetensors", "encoder.blocks.29.attn.out.weight": "model-00001.safetensors", "encoder.blocks.29.attn.out.bias": "model-00001.safetensors", "encoder.blocks.29.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.29.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.29.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.29.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.29.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.29.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.29.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.29.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.30.attn.query.weight": "model-00001.safetensors", "encoder.blocks.30.attn.query.bias": "model-00001.safetensors", "encoder.blocks.30.attn.key.weight": "model-00001.safetensors", "encoder.blocks.30.attn.value.weight": "model-00001.safetensors", "encoder.blocks.30.attn.value.bias": "model-00001.safetensors", "encoder.blocks.30.attn.out.weight": "model-00001.safetensors", "encoder.blocks.30.attn.out.bias": "model-00001.safetensors", "encoder.blocks.30.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.30.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.30.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.30.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.30.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.30.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.30.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.30.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.31.attn.query.weight": "model-00001.safetensors", "encoder.blocks.31.attn.query.bias": "model-00001.safetensors", "encoder.blocks.31.attn.key.weight": "model-00001.safetensors", "encoder.blocks.31.attn.value.weight": "model-00001.safetensors", "encoder.blocks.31.attn.value.bias": "model-00001.safetensors", "encoder.blocks.31.attn.out.weight": "model-00001.safetensors", "encoder.blocks.31.attn.out.bias": "model-00001.safetensors", "encoder.blocks.31.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.31.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.31.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.31.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.31.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.31.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.31.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.31.mlp_ln.bias": "model-00001.safetensors", "encoder.after_norm.weight": "model-00001.safetensors", "encoder.after_norm.bias": "model-00001.safetensors", "adapter.conv.weight": "model-00001.safetensors", "adapter.conv.bias": "model-00001.safetensors", "adapter.linear1.weight": "model-00001.safetensors", "adapter.linear1.bias": "model-00001.safetensors", "adapter.linear2.weight": "model-00001.safetensors", "adapter.linear2.bias": "model-00001.safetensors", "model.embed_tokens.weight": "model-00001.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.0.input_layernorm.weight": "model-00001.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.0.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.0.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.0.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.1.input_layernorm.weight": "model-00001.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.1.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.1.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.1.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.2.input_layernorm.weight": "model-00001.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.2.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.2.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.2.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.3.input_layernorm.weight": "model-00001.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.3.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.3.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.3.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.4.input_layernorm.weight": "model-00001.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.4.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.4.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.4.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.5.input_layernorm.weight": "model-00001.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.5.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.5.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.5.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.6.input_layernorm.weight": "model-00001.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.6.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.6.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.6.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.7.input_layernorm.weight": "model-00001.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.7.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.7.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.7.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.8.input_layernorm.weight": "model-00001.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.8.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.8.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.8.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.9.input_layernorm.weight": "model-00001.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.9.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.9.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.9.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.10.input_layernorm.weight": "model-00001.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.10.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.10.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.10.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.11.input_layernorm.weight": "model-00001.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.11.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.11.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.11.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.12.input_layernorm.weight": "model-00001.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.12.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.12.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.12.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.13.input_layernorm.weight": "model-00001.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.13.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.13.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.13.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.14.input_layernorm.weight": "model-00001.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.14.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.14.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.14.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.15.input_layernorm.weight": "model-00001.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.15.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.15.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.15.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.16.input_layernorm.weight": "model-00001.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.16.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.16.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.16.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.17.input_layernorm.weight": "model-00002.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.17.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.17.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.17.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.18.input_layernorm.weight": "model-00002.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.18.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.18.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.18.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.19.input_layernorm.weight": "model-00002.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.19.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.19.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.19.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.20.input_layernorm.weight": "model-00002.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.20.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.20.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.20.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.21.input_layernorm.weight": "model-00002.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.21.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.21.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.21.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.22.input_layernorm.weight": "model-00002.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.22.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.22.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.22.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.23.input_layernorm.weight": "model-00002.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.23.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.23.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.23.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.24.input_layernorm.weight": "model-00002.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.24.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.24.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.24.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.25.input_layernorm.weight": "model-00002.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.25.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.25.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.25.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.26.input_layernorm.weight": "model-00002.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.26.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.26.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.26.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.27.input_layernorm.weight": "model-00002.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.27.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.27.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.27.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00002.safetensors", "model.norm.weight": "model-00002.safetensors", "lm_head.weight": "model-00002.safetensors"}}
models/Step-Audio-2-mini-Think/modeling_step_audio_2.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterable, Optional, Tuple
2
+
3
+ import librosa
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import torchaudio
7
+ from torch import Tensor, nn
8
+ from transformers import PreTrainedModel, Qwen2Model
9
+ from transformers.generation.utils import GenerationMixin
10
+ from transformers.modeling_outputs import CausalLMOutputWithPast
11
+
12
+ from .configuration_step_audio_2 import StepAudio2Config
13
+
14
+
15
+ def _mel_filters(n_mels: int) -> torch.Tensor:
16
+ """Load the mel filterbank matrix for projecting STFT into a Mel spectrogram."""
17
+ assert n_mels in {80, 128}, f"Unsupported n_mels: {n_mels}"
18
+ if n_mels == 128:
19
+ return torch.from_numpy(librosa.filters.mel(sr=16000, n_fft=400, n_mels=128))
20
+ else:
21
+ return torch.from_numpy(librosa.filters.mel(sr=16000, n_fft=400, n_mels=80))
22
+
23
+
24
+ def load_audio(file_path, target_rate=16000, max_length=None):
25
+ """
26
+ Open an audio file and read as mono waveform, resampling as necessary
27
+ If max_length is provided, truncate the audio to that length
28
+ """
29
+ waveform, sample_rate = torchaudio.load(file_path)
30
+ if sample_rate != target_rate:
31
+ waveform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_rate)(waveform)
32
+ audio = waveform[0] # get the first channel
33
+
34
+ # Truncate audio if it exceeds max_length
35
+ if max_length is not None and audio.shape[0] > max_length:
36
+ audio = audio[:max_length]
37
+
38
+ return audio
39
+
40
+ def log_mel_spectrogram(audio, n_mels=128, padding=479, device=None):
41
+ """
42
+ Compute the log-Mel spectrogram with specific padding for StepAudio
43
+ """
44
+ if not torch.is_tensor(audio):
45
+ if isinstance(audio, str):
46
+ audio = load_audio(audio)
47
+ audio = torch.from_numpy(audio)
48
+ if device is not None:
49
+ audio = audio.to(device)
50
+ if padding > 0:
51
+ audio = F.pad(audio, (0, padding))
52
+ window = torch.hann_window(400).to(audio.device)
53
+ stft = torch.stft(audio, 400, 160, window=window, return_complex=True)
54
+ magnitudes = stft[..., :-1].abs() ** 2
55
+ filters = _mel_filters(n_mels)
56
+ mel_spec = filters @ magnitudes
57
+
58
+ log_spec = torch.clamp(mel_spec, min=1e-10).log10()
59
+ log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
60
+ log_spec = (log_spec + 4.0) / 4.0
61
+ return log_spec
62
+
63
+ def compute_token_num(max_feature_len):
64
+ # First, audio goes through encoder:
65
+ # 1. conv1: kernel=3, stride=1, padding=1 -> size unchanged
66
+ # 2. conv2: kernel=3, stride=2, padding=1 -> size/2
67
+ # 3. avg_pooler: kernel=2, stride=2 -> size/2
68
+ max_feature_len = max_feature_len - 2 # remove padding
69
+ encoder_output_dim = (max_feature_len + 1) // 2 // 2 # after conv2 and avg_pooler
70
+
71
+ # Then through adaptor (parameters from config file):
72
+ padding = 1
73
+ kernel_size = 3 # from config: audio_encoder_config.kernel_size
74
+ stride = 2 # from config: audio_encoder_config.adapter_stride
75
+ adapter_output_dim = (encoder_output_dim + 2 * padding - kernel_size) // stride + 1
76
+ return adapter_output_dim
77
+
78
+ def make_non_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
79
+ """Make mask tensor containing indices of non-padded part.
80
+
81
+ The sequences in a batch may have different lengths. To enable
82
+ batch computing, padding is need to make all sequence in same
83
+ size. To avoid the padding part pass value to context dependent
84
+ block such as attention or convolution , this padding part is
85
+ masked.
86
+
87
+ 1 for non-padded part and 0 for padded part.
88
+
89
+ Parameters
90
+ ----------
91
+ lengths (torch.Tensor): Batch of lengths (B,).
92
+
93
+ Returns:
94
+ -------
95
+ torch.Tensor: Mask tensor containing indices of padded part (B, max_T).
96
+
97
+ Examples:
98
+ >>> import torch
99
+ >>> import s3tokenizer
100
+ >>> lengths = torch.tensor([5, 3, 2])
101
+ >>> masks = s3tokenizer.make_non_pad_mask(lengths)
102
+ masks = [[1, 1, 1, 1, 1],
103
+ [1, 1, 1, 0, 0],
104
+ [1, 1, 0, 0, 0]]
105
+ """
106
+ batch_size = lengths.size(0)
107
+ max_len = max_len if max_len > 0 else lengths.max().item()
108
+ seq_range = torch.arange(0,
109
+ max_len,
110
+ dtype=torch.int64,
111
+ device=lengths.device)
112
+ seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
113
+ seq_length_expand = lengths.unsqueeze(-1)
114
+ mask = seq_range_expand >= seq_length_expand
115
+ return ~mask
116
+
117
+ def mask_to_bias(mask: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
118
+ """Convert bool-tensor to float-tensor for flash attention.
119
+
120
+ Parameters
121
+ ----------
122
+ lengths (torch.Tensor): Batch of lengths (B, ?).
123
+
124
+ Returns:
125
+ -------
126
+ torch.Tensor: Mask tensor containing indices of padded part (B, ?).
127
+
128
+ Examples:
129
+ >>> import torch
130
+ >>> import s3tokenizer
131
+ >>> lengths = torch.tensor([5, 3, 2])
132
+ >>> masks = s3tokenizer.make_non_pad_mask(lengths)
133
+ masks = [[1, 1, 1, 1, 1],
134
+ [1, 1, 1, 0, 0],
135
+ [1, 1, 0, 0, 0]]
136
+ >>> new_masks = s3tokenizer.mask_to_bias(masks, torch.float32)
137
+ new_masks = [[-0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00],
138
+ [-0.0000e+00, -0.0000e+00, -0.0000e+00, -1.0000e+10, -1.0000e+10],
139
+ [-0.0000e+00, -0.0000e+00, -1.0000e+10, -1.0000e+10, -1.0000e+10]]
140
+ """
141
+ assert mask.dtype == torch.bool
142
+ assert dtype in [torch.float32, torch.bfloat16, torch.float16]
143
+ mask = mask.to(dtype)
144
+ # attention mask bias
145
+ # NOTE(Mddct): torch.finfo jit issues
146
+ # chunk_masks = (1.0 - chunk_masks) * torch.finfo(dtype).min
147
+ mask = (1.0 - mask) * -1.0e+10
148
+ return mask
149
+
150
+ class LayerNorm(nn.LayerNorm):
151
+ def forward(self, input: Tensor) -> Tensor:
152
+ return super().forward(input).type(input.dtype)
153
+
154
+ class Linear(nn.Linear):
155
+ def forward(self, input: Tensor) -> Tensor:
156
+ return F.linear(
157
+ input,
158
+ self.weight.to(input.dtype),
159
+ None if self.bias is None else self.bias.to(input.dtype),
160
+ )
161
+
162
+ class Conv1d(nn.Conv1d):
163
+ def _conv_forward(
164
+ self, input: Tensor, weight: Tensor, bias: Optional[Tensor]
165
+ ) -> Tensor:
166
+ return super()._conv_forward(
167
+ input, weight.to(input.dtype), None if bias is None else bias.to(input.dtype)
168
+ )
169
+
170
+ class MultiHeadAttention(nn.Module):
171
+ def __init__(self, n_state: int, n_head: int):
172
+ super().__init__()
173
+ self.n_head = n_head
174
+ self.query = Linear(n_state, n_state)
175
+ self.key = Linear(n_state, n_state, bias=False)
176
+ self.value = Linear(n_state, n_state)
177
+ self.out = Linear(n_state, n_state)
178
+
179
+ def forward(
180
+ self,
181
+ x: Tensor,
182
+ mask: Optional[Tensor] = None,
183
+ ):
184
+ q = self.query(x)
185
+ k = self.key(x)
186
+ v = self.value(x)
187
+
188
+ wv, qk = self.qkv_attention(q, k, v, mask)
189
+ return self.out(wv), qk
190
+
191
+ def qkv_attention(
192
+ self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None
193
+ ):
194
+ _, T, D = q.shape
195
+ scale = (D // self.n_head) ** -0.25
196
+ q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) * scale
197
+ k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 3, 1) * scale
198
+ v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
199
+
200
+ qk = q @ k # (B, n_head, T, T)
201
+ if mask is not None:
202
+ qk = qk + mask
203
+ qk = qk.float()
204
+
205
+ w = F.softmax(qk, dim=-1).to(q.dtype)
206
+ return (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2), qk.detach()
207
+
208
+ class ResidualAttentionBlock(nn.Module):
209
+ def __init__(self, n_state: int, n_head: int):
210
+ super().__init__()
211
+
212
+ self.attn = MultiHeadAttention(n_state, n_head)
213
+ self.attn_ln = LayerNorm(n_state)
214
+
215
+ n_mlp = n_state * 4
216
+ self.mlp = nn.Sequential(
217
+ Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state)
218
+ )
219
+ self.mlp_ln = LayerNorm(n_state)
220
+
221
+ def forward(
222
+ self,
223
+ x: Tensor,
224
+ mask: Optional[Tensor] = None,
225
+ ):
226
+ x = x + self.attn(self.attn_ln(x.contiguous()), mask=mask)[0]
227
+ x = x + self.mlp(self.mlp_ln(x.contiguous()))
228
+ return x
229
+
230
+ class AudioEncoder(nn.Module):
231
+ def __init__(
232
+ self, n_mels: int, n_ctx: int, n_state: int, n_head: int, n_layer: int
233
+ ):
234
+ super().__init__()
235
+ self.conv1 = Conv1d(n_mels, n_state, kernel_size=3, padding=1)
236
+ self.conv2 = Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1)
237
+ self.positional_embedding = nn.Embedding(n_ctx, n_state)
238
+ self.positional_embedding.requires_grad_(False)
239
+ self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList(
240
+ [ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)]
241
+ )
242
+ self.avg_pooler = nn.AvgPool1d(2, stride=2)
243
+ self.after_norm = LayerNorm(n_state)
244
+ self.gradient_checkpointing = False
245
+
246
+ def forward(self, x: Tensor, x_len: Tensor) -> Tuple[Tensor, Tensor]:
247
+ T = x.size(-1)
248
+ x = F.gelu(self.conv1(x))
249
+ x = F.gelu(self.conv2(x))
250
+ x = x.permute(0, 2, 1) # (B, T // 2, n_state)
251
+ mask = make_non_pad_mask(x_len, T).unsqueeze(1) # (B, 1, T)
252
+ mask = mask_to_bias(mask[:, :, (T + 1) % 2::2], x.dtype) # (B, 1, T // 2)
253
+ x = (x + self.positional_embedding.weight[:x.shape[1], :]).to(x.dtype)
254
+ for block in self.blocks:
255
+ if self.gradient_checkpointing and self.training:
256
+ x = torch.utils.checkpoint.checkpoint(block, x, mask.unsqueeze(1))
257
+ else:
258
+ x = block(x, mask.unsqueeze(1))
259
+ x = x.permute(0, 2, 1)
260
+ x = self.avg_pooler(x)
261
+ x = x.permute(0, 2, 1)
262
+ x_len = (x_len + 1) // 2 // 2
263
+ x = self.after_norm(x.contiguous())
264
+ return x, x_len
265
+
266
+ class Adaptor(nn.Module):
267
+ def __init__(
268
+ self,
269
+ n_state: int = 1280,
270
+ n_hidden: int = 3072,
271
+ kernel_size: int = 7,
272
+ stride: int = 4
273
+ ):
274
+ super().__init__()
275
+ self.stride = stride
276
+ if self.stride != -1:
277
+ # print("self.stride: {}".format(self.stride))
278
+ self.conv = Conv1d(n_state, n_state, kernel_size, stride, padding=1)
279
+ self.linear1 = nn.Linear(n_state, 2048)
280
+ self.relu = nn.ReLU()
281
+ self.linear2 = nn.Linear(2048, n_hidden)
282
+ self.gradient_checkpointing = False
283
+
284
+ def forward(self, x: Tensor) -> Tuple[Tensor]:
285
+ T = x.size(-1)
286
+ if self.stride != -1:
287
+ if self.gradient_checkpointing and self.training:
288
+ x = torch.utils.checkpoint.checkpoint(self.conv, x.permute(0, 2, 1))
289
+ x = x.permute(0, 2, 1)
290
+ else:
291
+ x = x.permute(0, 2, 1)
292
+ x = F.gelu(self.conv(x))
293
+ x = x.permute(0, 2, 1)
294
+ if self.gradient_checkpointing and self.training:
295
+ x = torch.utils.checkpoint.checkpoint(self.linear1, x)
296
+ x = torch.utils.checkpoint.checkpoint(self.relu, x)
297
+ x = torch.utils.checkpoint.checkpoint(self.linear2, x)
298
+ else:
299
+ x = self.linear1(x)
300
+ x = self.relu(x)
301
+ x = self.linear2(x)
302
+ return x
303
+
304
+ class StepAudio2ForCausalLM(PreTrainedModel, GenerationMixin):
305
+ config_class = StepAudio2Config
306
+ main_input_name = "input_ids"
307
+ # Important: Add this attribute to make HF recognize it as a model with generation capability
308
+ # _keys_to_ignore_on_load_missing = ["lm_head.weight"]
309
+ supports_gradient_checkpointing = True # 新增,声明支持gradient checkpointing
310
+
311
+ def __init__(self, config: StepAudio2Config):
312
+ super().__init__(config)
313
+ if isinstance(config.torch_dtype, str):
314
+ dtype = getattr(torch, config.torch_dtype)
315
+ else:
316
+ dtype = config.torch_dtype
317
+ self.model = Qwen2Model(config.text_config)
318
+ self.bf16 = dtype==torch.bfloat16
319
+ self.encoder = AudioEncoder(
320
+ config.audio_encoder_config.n_mels, config.audio_encoder_config.n_audio_ctx, config.audio_encoder_config.n_audio_state,
321
+ config.audio_encoder_config.n_audio_head, config.audio_encoder_config.n_audio_layer
322
+ )
323
+ self.adapter = Adaptor(
324
+ config.audio_encoder_config.n_audio_state, config.audio_encoder_config.llm_dim,
325
+ config.audio_encoder_config.kernel_size, config.audio_encoder_config.adapter_stride
326
+ )
327
+ if self.bf16:
328
+ self.encoder = self.encoder.bfloat16()
329
+ self.adapter = self.adapter.bfloat16()
330
+ self.lm_head = torch.nn.Linear(
331
+ config.text_config.hidden_size,
332
+ config.text_config.vocab_size,
333
+ bias=False,
334
+ dtype=dtype
335
+ )
336
+ self.post_init()
337
+
338
+ def forward(
339
+ self,
340
+ input_ids=None,
341
+ wavs=None,
342
+ wav_lens=None,
343
+ attention_mask=None,
344
+ **kwargs
345
+ ):
346
+ hidden_states = self.model.embed_tokens(input_ids)
347
+ if wavs is not None:
348
+ if self.bf16:
349
+ wavs = wavs.bfloat16()
350
+ out, feat_lens = self.encoder(wavs, wav_lens)
351
+ out = self.adapter(out)
352
+ feat_lens = (feat_lens - 1) // 2 + 1
353
+ insert_location = torch.nonzero(input_ids == 151688)
354
+ insert_location[:,1] += 1
355
+ for idx in range(len(insert_location)):
356
+ i,s = insert_location[idx]
357
+ hidden_states[i][s : s+feat_lens[idx]] = out[idx][:feat_lens[idx]]
358
+
359
+ x = self.model(inputs_embeds=hidden_states, attention_mask=attention_mask)[0]
360
+ logits = self.lm_head(x)
361
+ return CausalLMOutputWithPast(
362
+ logits=logits,
363
+ past_key_values=None,
364
+ hidden_states=None,
365
+ attentions=None
366
+ )
367
+
368
+ def get_input_embeddings(self):
369
+ """Return the model's input embeddings - required for GenerationMixin"""
370
+ return self.model.embed_tokens
371
+
372
+ def get_output_embeddings(self):
373
+ """Return the model's output embeddings (LM head) - required for GenerationMixin"""
374
+ return self.lm_head
375
+
376
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **kwargs):
377
+ """Prepare inputs for generation - required for GenerationMixin"""
378
+ # Keep the wavs and wav_lens from the initial call
379
+ wavs = kwargs.get("wavs", None)
380
+ wav_lens = kwargs.get("wav_lens", None)
381
+
382
+ # For generation steps after the first, we don't need to process audio again
383
+ # because the audio tokens have already been replaced in the input sequence
384
+ if "past_key_values" in kwargs and kwargs["past_key_values"] is not None:
385
+ # We're in a generation step, no need to process audio again
386
+ return {
387
+ "input_ids": input_ids,
388
+ "attention_mask": attention_mask,
389
+ "past_key_values": kwargs.get("past_key_values")
390
+ }
391
+
392
+ # First generation step, include audio processing
393
+ return {
394
+ "input_ids": input_ids,
395
+ "attention_mask": attention_mask,
396
+ "wavs": wavs,
397
+ "wav_lens": wav_lens
398
+ }
399
+
400
+ def _reorder_cache(self, past_key_values, beam_idx):
401
+ """Reorder the cache for beam search - required for GenerationMixin if using beam search"""
402
+ # If you're not using past_key_values or beam search, this can be a simple pass-through
403
+ # Otherwise implement according to your model's cache structure
404
+ return past_key_values
405
+
406
+ def _set_gradient_checkpointing(self, module, value=False):
407
+ # For Qwen2Model
408
+ if hasattr(self.model, 'gradient_checkpointing'):
409
+ self.model.gradient_checkpointing = value
410
+
411
+ # Add the missing _gradient_checkpointing_func method to Qwen2Model
412
+ # This is what Qwen2Model tries to use when gradient_checkpointing=True
413
+ if value and not hasattr(self.model, '_gradient_checkpointing_func'):
414
+ def _gradient_checkpointing_func(module_to_run, *args, **kwargs):
415
+ # This function wraps torch.utils.checkpoint.checkpoint
416
+ # and is used by Qwen2Model to perform checkpointing
417
+ return torch.utils.checkpoint.checkpoint(module_to_run, *args, **kwargs)
418
+
419
+ self.model._gradient_checkpointing_func = _gradient_checkpointing_func
420
+
421
+ # For custom encoder and adapter
422
+ if hasattr(self.encoder, 'gradient_checkpointing'):
423
+ self.encoder.gradient_checkpointing = value
424
+ if hasattr(self.adapter, 'gradient_checkpointing'):
425
+ self.adapter.gradient_checkpointing = value
models/Step-Audio-2-mini-Think/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/stepfun-ai/Step-Audio-2-mini-Think
models/Step-Audio-2-mini-Think/special_tokens_map.json ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|EOT|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|BOT|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<|CALL_START|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "<|CALL_END|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ {
32
+ "content": "<|THINK_START|>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ {
39
+ "content": "<|THINK_END|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ },
45
+ {
46
+ "content": "<|IMG_START|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ {
53
+ "content": "<|IMG_END|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false
58
+ },
59
+ {
60
+ "content": "<|META_START|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ },
66
+ {
67
+ "content": "<|META_END|>",
68
+ "lstrip": false,
69
+ "normalized": false,
70
+ "rstrip": false,
71
+ "single_word": false
72
+ },
73
+ {
74
+ "content": "<im_patch>",
75
+ "lstrip": false,
76
+ "normalized": false,
77
+ "rstrip": false,
78
+ "single_word": false
79
+ },
80
+ {
81
+ "content": "<im_start>",
82
+ "lstrip": false,
83
+ "normalized": false,
84
+ "rstrip": false,
85
+ "single_word": false
86
+ },
87
+ {
88
+ "content": "<im_end>",
89
+ "lstrip": false,
90
+ "normalized": false,
91
+ "rstrip": false,
92
+ "single_word": false
93
+ },
94
+ {
95
+ "content": "<dream>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false
100
+ },
101
+ {
102
+ "content": "<dream_start>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false
107
+ },
108
+ {
109
+ "content": "<dream_end>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false
114
+ },
115
+ {
116
+ "content": "<|MASK_1e69f|>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false
121
+ },
122
+ {
123
+ "content": "<|UNMASK_1e69f|>",
124
+ "lstrip": false,
125
+ "normalized": false,
126
+ "rstrip": false,
127
+ "single_word": false
128
+ },
129
+ {
130
+ "content": "<video_start>",
131
+ "lstrip": false,
132
+ "normalized": false,
133
+ "rstrip": false,
134
+ "single_word": false
135
+ },
136
+ {
137
+ "content": "<video_end>",
138
+ "lstrip": false,
139
+ "normalized": false,
140
+ "rstrip": false,
141
+ "single_word": false
142
+ },
143
+ {
144
+ "content": "<patch_start>",
145
+ "lstrip": false,
146
+ "normalized": false,
147
+ "rstrip": false,
148
+ "single_word": false
149
+ },
150
+ {
151
+ "content": "<patch_end>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false
156
+ },
157
+ {
158
+ "content": "<patch_newline>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false
163
+ },
164
+ {
165
+ "content": "<audio_start>",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false
170
+ },
171
+ {
172
+ "content": "<audio_end>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false
177
+ },
178
+ {
179
+ "content": "<audio_patch>",
180
+ "lstrip": false,
181
+ "normalized": false,
182
+ "rstrip": false,
183
+ "single_word": false
184
+ },
185
+ {
186
+ "content": "<audio_patch_pad>",
187
+ "lstrip": false,
188
+ "normalized": false,
189
+ "rstrip": false,
190
+ "single_word": false
191
+ },
192
+ {
193
+ "content": "<|SC|>",
194
+ "lstrip": false,
195
+ "normalized": false,
196
+ "rstrip": false,
197
+ "single_word": false
198
+ },
199
+ {
200
+ "content": "<tts_start>",
201
+ "lstrip": false,
202
+ "normalized": false,
203
+ "rstrip": false,
204
+ "single_word": false
205
+ },
206
+ {
207
+ "content": "<tts_end>",
208
+ "lstrip": false,
209
+ "normalized": false,
210
+ "rstrip": false,
211
+ "single_word": false
212
+ },
213
+ {
214
+ "content": "<tts_pad>",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false
219
+ }
220
+ ],
221
+ "eos_token": {
222
+ "content": "<|endoftext|>",
223
+ "lstrip": false,
224
+ "normalized": false,
225
+ "rstrip": false,
226
+ "single_word": false
227
+ },
228
+ "pad_token": {
229
+ "content": "<|endoftext|>",
230
+ "lstrip": false,
231
+ "normalized": false,
232
+ "rstrip": false,
233
+ "single_word": false
234
+ }
235
+ }
models/Step-Audio-2-mini-Think/token2wav/campplus.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6ac6a63997761ae2997373e2ee1c47040854b4b759ea41ec48e4e42df0f4d73
3
+ size 28303423
models/Step-Audio-2-mini-Think/token2wav/flow.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15ccff24256ff61537c7f8b51e025116b83405f3fb017b54b008fc97da115446
3
+ size 623466603
models/Step-Audio-2-mini-Think/token2wav/flow.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flow: !new:cosyvoice2.flow.flow.CausalMaskedDiffWithXvec
2
+ input_size: 512
3
+ output_size: 80
4
+ spk_embed_dim: 192
5
+ output_type: 'mel'
6
+ vocab_size: 6561
7
+ encoder: !new:cosyvoice2.transformer.upsample_encoder_v2.UpsampleConformerEncoderV2
8
+ input_size: 512
9
+ output_size: 512
10
+ input_layer: 'linear'
11
+ pre_lookahead_len: 3
12
+ num_blocks: 6
13
+ num_up_blocks: 4
14
+ up_stride: 2
15
+ up_scale_factor: 2
16
+ attention_heads: 8
17
+ pos_enc_layer_type: 'rel_pos_espnet'
18
+ selfattention_layer_type: 'rel_selfattn'
19
+ key_bias: true
20
+ linear_units: 2048
21
+ dropout_rate: 0.1
22
+ positional_dropout_rate: 0.1
23
+ attention_dropout_rate: 0.1
24
+ normalize_before: True
25
+ decoder: !new:cosyvoice2.flow.flow_matching.CausalConditionalCFM
26
+ inference_cfg_rate: 0.7
27
+ estimator: !new:cosyvoice2.flow.decoder_dit.DiT
28
+ in_channels: 320
29
+ out_channels: 80
30
+ mlp_ratio: 4.0
31
+ depth: 16
32
+ num_heads: 8
33
+ head_dim: 64
34
+ hidden_size: 512
models/Step-Audio-2-mini-Think/token2wav/hift.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3386cc880324d4e98e05987b99107f49e40ed925b8ecc87c1f4939432d429879
3
+ size 83390254
models/Step-Audio-2-mini-Think/token2wav/speech_tokenizer_v2_25hz.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d43342aa12163a80bf07bffb94c9de2e120a8df2f9917cd2f642e7f4219c6f71
3
+ size 496082973
models/Step-Audio-2-mini-Think/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
models/Step-Audio-2-mini-Think/tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
models/Step-Audio-2-mini-Think/vocab.json ADDED
The diff for this file is too large to render. See raw diff