Step-Audio (models)
Browse files- .gitattributes +3 -0
- models/Step-Audio-2-mini/.gitattributes +39 -0
- models/Step-Audio-2-mini/README.md +875 -0
- models/Step-Audio-2-mini/added_tokens.json +0 -0
- models/Step-Audio-2-mini/assets/architecture5.png +3 -0
- models/Step-Audio-2-mini/assets/arxiv.svg +1 -0
- models/Step-Audio-2-mini/assets/logo.png +0 -0
- models/Step-Audio-2-mini/assets/qrcode.jpg +0 -0
- models/Step-Audio-2-mini/assets/radar.png +3 -0
- models/Step-Audio-2-mini/assets/wechat_group.png +0 -0
- models/Step-Audio-2-mini/config.json +38 -0
- models/Step-Audio-2-mini/configuration_step_audio_2.py +128 -0
- models/Step-Audio-2-mini/merges.txt +0 -0
- models/Step-Audio-2-mini/model-00001-of-00004.safetensors +3 -0
- models/Step-Audio-2-mini/model-00002-of-00004.safetensors +3 -0
- models/Step-Audio-2-mini/model-00003-of-00004.safetensors +3 -0
- models/Step-Audio-2-mini/model-00004-of-00004.safetensors +3 -0
- models/Step-Audio-2-mini/model.safetensors.index.json +839 -0
- models/Step-Audio-2-mini/modeling_step_audio_2.py +425 -0
- models/Step-Audio-2-mini/source.txt +1 -0
- models/Step-Audio-2-mini/special_tokens_map.json +43 -0
- models/Step-Audio-2-mini/token2wav/campplus.onnx +3 -0
- models/Step-Audio-2-mini/token2wav/flow.pt +3 -0
- models/Step-Audio-2-mini/token2wav/flow.yaml +34 -0
- models/Step-Audio-2-mini/token2wav/hift.pt +3 -0
- models/Step-Audio-2-mini/token2wav/speech_tokenizer_v2_25hz.onnx +3 -0
- models/Step-Audio-2-mini/tokenizer.json +3 -0
- models/Step-Audio-2-mini/tokenizer_config.json +0 -0
- models/Step-Audio-2-mini/vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -36,3 +36,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 36 |
Step-Audio-AQAA.[[:space:]]A[[:space:]]Fully[[:space:]]End-to-End[[:space:]]Expressive[[:space:]]Large[[:space:]]Audio[[:space:]]Language[[:space:]]Model.pdf filter=lfs diff=lfs merge=lfs -text
|
| 37 |
Step-Audio-EditX[[:space:]]Technical[[:space:]]Report.pdf filter=lfs diff=lfs merge=lfs -text
|
| 38 |
Step-Audio.[[:space:]]Unified[[:space:]]Understanding[[:space:]]and[[:space:]]Generation[[:space:]]in[[:space:]]Intelligent[[:space:]]Speech[[:space:]]Interaction.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
Step-Audio-AQAA.[[:space:]]A[[:space:]]Fully[[:space:]]End-to-End[[:space:]]Expressive[[:space:]]Large[[:space:]]Audio[[:space:]]Language[[:space:]]Model.pdf filter=lfs diff=lfs merge=lfs -text
|
| 37 |
Step-Audio-EditX[[:space:]]Technical[[:space:]]Report.pdf filter=lfs diff=lfs merge=lfs -text
|
| 38 |
Step-Audio.[[:space:]]Unified[[:space:]]Understanding[[:space:]]and[[:space:]]Generation[[:space:]]in[[:space:]]Intelligent[[:space:]]Speech[[:space:]]Interaction.pdf filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
models/Step-Audio-2-mini/assets/architecture5.png filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
models/Step-Audio-2-mini/assets/radar.png filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
models/Step-Audio-2-mini/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
models/Step-Audio-2-mini/.gitattributes
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
assets/architecture5.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
assets/radar.png filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
assets/wechat_group.jpg filter=lfs diff=lfs merge=lfs -text
|
models/Step-Audio-2-mini/README.md
ADDED
|
@@ -0,0 +1,875 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
library_name: transformers
|
| 4 |
+
pipeline_tag: any-to-any
|
| 5 |
+
language:
|
| 6 |
+
- en
|
| 7 |
+
- zh
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
<div align="center">
|
| 11 |
+
<img src="assets/logo.png" height=100>
|
| 12 |
+
</div>
|
| 13 |
+
|
| 14 |
+
<div align="center" style="line-height: 1;">
|
| 15 |
+
<a href="https://github.com/stepfun-ai/Step-Audio2" target="_blank"><img alt="GitHub" src="https://img.shields.io/badge/GitHub-StepFun-white?logo=github&logoColor=white"/></a>  
|
| 16 |
+
<a href="https://www.stepfun.com/docs/en/step-audio2" target="_blank"><img alt="Homepage" src="https://img.shields.io/badge/Homepage-StepFun-white?logo=StepFun&logoColor=white"/></a>  
|
| 17 |
+
<a href="https://x.com/StepFun_ai" target="_blank"><img alt="Twitter Follow" src="https://img.shields.io/badge/Twitter-StepFun-white?logo=x&logoColor=white"/></a>  
|
| 18 |
+
<a href="https://discord.com/invite/XHheP5Fn" target="_blank"><img alt="Discord" src="https://img.shields.io/badge/Discord-StepFun-white?logo=discord&logoColor=white"/></a>
|
| 19 |
+
</div>
|
| 20 |
+
<div align="center">
|
| 21 |
+
<a href="https://huggingface.co/stepfun-ai/Step-Audio-2-mini"><img src="https://img.shields.io/static/v1?label=Step-Audio-2-mini&message=HuggingFace&color=yellow"></a>  
|
| 22 |
+
<a href="https://huggingface.co/stepfun-ai/Step-Audio-2-mini-Base"><img src="https://img.shields.io/static/v1?label=Step-Audio-2-mini-Base&message=HuggingFace&color=yellow"></a>
|
| 23 |
+
</div>
|
| 24 |
+
<div align="center">
|
| 25 |
+
<a href="https://arxiv.org/abs/2507.16632"><img src="assets/arxiv.svg"></a>  
|
| 26 |
+
<a href="https://github.com/stepfun-ai/Step-Audio2/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/badge/License-Apache%202.0-blue?&color=blue"/></a>
|
| 27 |
+
</div>
|
| 28 |
+
|
| 29 |
+
## Introduction
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
Step-Audio 2 is an end-to-end multi-modal large language model designed for industry-strength audio understanding and speech conversation, presented in the paper [Step-Audio 2 Technical Report](https://huggingface.co/papers/2507.16632).
|
| 33 |
+
|
| 34 |
+
- **Advanced Speech and Audio Understanding**: Promising performance in ASR and audio understanding by comprehending and reasoning semantic information, para-linguistic and non-vocal information.
|
| 35 |
+
|
| 36 |
+
- **Intelligent Speech Conversation**: Achieving natural and intelligent interactions that are contextually appropriate for various conversational scenarios and paralinguistic information.
|
| 37 |
+
|
| 38 |
+
- **Tool Calling and Multimodal RAG**: By leveraging tool calling and RAG to access real-world knowledge (both textual and acoustic), Step-Audio 2 can generate responses with fewer hallucinations for diverse scenarios, while also having the ability to switch timbres based on retrieved speech.
|
| 39 |
+
|
| 40 |
+
- **State-of-the-Art Performance**: Achieving state-of-the-art performance on various audio understanding and conversational benchmarks compared to other open-source and commercial solutions. (See [Evaluation](#evaluation) and [Technical Report](https://huggingface.co/papers/2507.16632)).
|
| 41 |
+
|
| 42 |
+
+ **Open-source**: [Step-Audio 2 mini](https://huggingface.co/stepfun-ai/Step-Audio-2-mini) and [Step-Audio 2 mini Base](https://huggingface.co/stepfun-ai/Step-Audio-2-mini-Base) are released under [Apache 2.0](LICENSE) license.
|
| 43 |
+
|
| 44 |
+
## Model Download
|
| 45 |
+
### Huggingface
|
| 46 |
+
| Models | 🤗 Hugging Face |
|
| 47 |
+
|-------|-------|
|
| 48 |
+
| Step-Audio 2 mini | [stepfun-ai/Step-Audio-2-mini](https://huggingface.co/stepfun-ai/Step-Audio-2-mini) |
|
| 49 |
+
| Step-Audio 2 mini Base | [stepfun-ai/Step-Audio-2-mini-Base](https://huggingface.co/stepfun-ai/Step-Audio-2-mini-Base) |
|
| 50 |
+
|
| 51 |
+
<!-- ### Modelscope
|
| 52 |
+
| Models | Links |
|
| 53 |
+
|-------|-------|
|
| 54 |
+
| Step-Audio-2-mini | [modelscope](https://modelscope.cn/models/stepfun-ai/Step-Audio-2-mini) |
|
| 55 |
+
| Step-Audio-2-mini-Base | [modelscope](https://modelscope.cn/models/stepfun-ai/Step-Audio-2-mini-Base) | -->
|
| 56 |
+
|
| 57 |
+
## Model Usage
|
| 58 |
+
### 🔧 Dependencies and Installation
|
| 59 |
+
- Python >= 3.10
|
| 60 |
+
- [PyTorch >= 2.3-cu121](https://pytorch.org/)
|
| 61 |
+
- [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads)
|
| 62 |
+
|
| 63 |
+
```bash
|
| 64 |
+
conda create -n stepaudio2 python=3.10
|
| 65 |
+
conda activate stepaudio2
|
| 66 |
+
pip install transformers==4.49.0 torchaudio librosa onnxruntime s3tokenizer diffusers hyperpyyaml
|
| 67 |
+
|
| 68 |
+
git clone https://github.com/stepfun-ai/Step-Audio2.git
|
| 69 |
+
cd Step-Audio2
|
| 70 |
+
git lfs install
|
| 71 |
+
git clone https://huggingface.co/stepfun-ai/Step-Audio-2-mini
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
### 🚀 Inference Scripts
|
| 75 |
+
|
| 76 |
+
```bash
|
| 77 |
+
python examples.py
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
### 🚀 Local web demonstration
|
| 81 |
+
|
| 82 |
+
```bash
|
| 83 |
+
pip install gradio
|
| 84 |
+
python web_demo.py
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
## Online demonstration
|
| 89 |
+
|
| 90 |
+
### StepFun realtime console
|
| 91 |
+
|
| 92 |
+
- Both Step-Audio 2 and Step-Audio 2 mini are available in our [StepFun realtime console](https://realtime-console.stepfun.com/) with web search tool enabled.
|
| 93 |
+
- You will need an API key from the [StepFun Open Platform](https://platform.stepfun.com/).
|
| 94 |
+
|
| 95 |
+
### StepFun AI Assistant
|
| 96 |
+
|
| 97 |
+
- Step-Audio 2 is also available in our StepFun AI Assistant mobile App with both web and audio search tools enabled.
|
| 98 |
+
- Please scan the following QR code to download it from your app store then tap the phone icon in the top-right corner.
|
| 99 |
+
|
| 100 |
+
<div align="center">
|
| 101 |
+
<img src="./assets/qrcode.jpg" width="200" alt="QR code">
|
| 102 |
+
</div>
|
| 103 |
+
|
| 104 |
+
## WeChat group
|
| 105 |
+
|
| 106 |
+
You can scan the following QR code to join our WeChat group for communication and discussion.
|
| 107 |
+
<div align="center">
|
| 108 |
+
<img src="./assets/wechat_group.png" width="200" alt="QR code">
|
| 109 |
+
</div>
|
| 110 |
+
|
| 111 |
+
## Evaluation
|
| 112 |
+
<div align="center">
|
| 113 |
+
<img src="assets/radar.png" alt="Architecture" width="600" />
|
| 114 |
+
</div>
|
| 115 |
+
|
| 116 |
+
### Automatic speech recognition
|
| 117 |
+
CER for Chinese, Cantonese and Japanese and WER for Arabian and English. N/A indicates that the language is not supported.
|
| 118 |
+
|
| 119 |
+
<table border="1" cellpadding="5" cellspacing="0" align="center">
|
| 120 |
+
<thead>
|
| 121 |
+
<tr>
|
| 122 |
+
<th style="text-align: center;">Category</th>
|
| 123 |
+
<th style="text-align: center;">Test set</th>
|
| 124 |
+
<th style="text-align: center;">Doubao LLM ASR</th>
|
| 125 |
+
<th style="text-align: center;">GPT-4o Transcribe</th>
|
| 126 |
+
<th style="text-align: center;">Kimi-Audio</th>
|
| 127 |
+
<th style="text-align: center;">Qwen-Omni</th>
|
| 128 |
+
<th style="text-align: center;">Step-Audio 2</th>
|
| 129 |
+
<th style="text-align: center;">Step-Audio 2 mini</th>
|
| 130 |
+
</tr>
|
| 131 |
+
</thead>
|
| 132 |
+
<tbody>
|
| 133 |
+
<tr>
|
| 134 |
+
<td rowspan="5" style="text-align: center; vertical-align: middle;"><strong>English</strong></td>
|
| 135 |
+
<td align="left">Common Voice</td>
|
| 136 |
+
<td align="center">9.20</td>
|
| 137 |
+
<td align="center">9.30</td>
|
| 138 |
+
<td align="center">7.83</td>
|
| 139 |
+
<td align="center">8.33</td>
|
| 140 |
+
<td align="center"><strong>5.95</strong></td>
|
| 141 |
+
<td align="center">6.76</td>
|
| 142 |
+
</tr>
|
| 143 |
+
<tr>
|
| 144 |
+
<td align="left">FLEURS English</td>
|
| 145 |
+
<td align="center">7.22</td>
|
| 146 |
+
<td align="center"><strong>2.71</strong></td>
|
| 147 |
+
<td align="center">4.47</td>
|
| 148 |
+
<td align="center">5.05</td>
|
| 149 |
+
<td align="center">3.03</td>
|
| 150 |
+
<td align="center">3.05</td>
|
| 151 |
+
</tr>
|
| 152 |
+
<tr>
|
| 153 |
+
<td align="left">LibriSpeech clean</td>
|
| 154 |
+
<td align="center">2.92</td>
|
| 155 |
+
<td align="center">1.75</td>
|
| 156 |
+
<td align="center">1.49</td>
|
| 157 |
+
<td align="center">2.93</td>
|
| 158 |
+
<td align="center"><strong>1.17</strong></td>
|
| 159 |
+
<td align="center">1.33</td>
|
| 160 |
+
</tr>
|
| 161 |
+
<tr>
|
| 162 |
+
<td align="left">LibriSpeech other</td>
|
| 163 |
+
<td align="center">5.32</td>
|
| 164 |
+
<td align="center">4.23</td>
|
| 165 |
+
<td align="center">2.91</td>
|
| 166 |
+
<td align="center">5.07</td>
|
| 167 |
+
<td align="center"><strong>2.42</strong></td>
|
| 168 |
+
<td align="center">2.86</td>
|
| 169 |
+
</tr>
|
| 170 |
+
<tr>
|
| 171 |
+
<td align="left"><strong>Average</strong></td>
|
| 172 |
+
<td align="center">6.17</td>
|
| 173 |
+
<td align="center">4.50</td>
|
| 174 |
+
<td align="center">4.18</td>
|
| 175 |
+
<td align="center">5.35</td>
|
| 176 |
+
<td align="center"><strong>3.14</strong></td>
|
| 177 |
+
<td align="center">3.50</td>
|
| 178 |
+
</tr>
|
| 179 |
+
<tr>
|
| 180 |
+
<td rowspan="7" style="text-align: center; vertical-align: middle;"><strong>Chinese</strong></td>
|
| 181 |
+
<td align="left">AISHELL</td>
|
| 182 |
+
<td align="center">0.98</td>
|
| 183 |
+
<td align="center">3.52</td>
|
| 184 |
+
<td align="center">0.64</td>
|
| 185 |
+
<td align="center">1.17</td>
|
| 186 |
+
<td align="center"><strong>0.63</strong></td>
|
| 187 |
+
<td align="center">0.78</td>
|
| 188 |
+
</tr>
|
| 189 |
+
<tr>
|
| 190 |
+
<td align="left">AISHELL-2</td>
|
| 191 |
+
<td align="center">3.10</td>
|
| 192 |
+
<td align="center">4.26</td>
|
| 193 |
+
<td align="center">2.67</td>
|
| 194 |
+
<td align="center">2.40</td>
|
| 195 |
+
<td align="center"><strong>2.10</strong></td>
|
| 196 |
+
<td align="center">2.16</td>
|
| 197 |
+
</tr>
|
| 198 |
+
<tr>
|
| 199 |
+
<td align="left">FLEURS Chinese</td>
|
| 200 |
+
<td align="center">2.92</td>
|
| 201 |
+
<td align="center">2.62</td>
|
| 202 |
+
<td align="center">2.91</td>
|
| 203 |
+
<td align="center">7.01</td>
|
| 204 |
+
<td align="center">2.68</td>
|
| 205 |
+
<td align="center"><strong>2.53</strong></td>
|
| 206 |
+
<td align="center">2.53</td>
|
| 207 |
+
</tr>
|
| 208 |
+
<tr>
|
| 209 |
+
<td align="left">KeSpeech phase1</td>
|
| 210 |
+
<td align="center">6.48</td>
|
| 211 |
+
<td align="center">26.80</td>
|
| 212 |
+
<td align="center">5.11</td>
|
| 213 |
+
<td align="center">6.45</td>
|
| 214 |
+
<td align="center"><strong>3.63</strong></td>
|
| 215 |
+
<td align="center">3.97</td>
|
| 216 |
+
</tr>
|
| 217 |
+
<tr>
|
| 218 |
+
<td align="left">WenetSpeech meeting</td>
|
| 219 |
+
<td align="center">4.90</td>
|
| 220 |
+
<td align="center">31.40</td>
|
| 221 |
+
<td align="center">5.21</td>
|
| 222 |
+
<td align="center">6.61</td>
|
| 223 |
+
<td align="center"><strong>4.75</strong></td>
|
| 224 |
+
<td align="center">4.87</td>
|
| 225 |
+
</tr>
|
| 226 |
+
<tr>
|
| 227 |
+
<td align="left">WenetSpeech net</td>
|
| 228 |
+
<td align="center"><strong>4.46</strong></td>
|
| 229 |
+
<td align="center">15.71</td>
|
| 230 |
+
<td align="center">5.93</td>
|
| 231 |
+
<td align="center">5.24</td>
|
| 232 |
+
<td align="center">4.67</td>
|
| 233 |
+
<td align="center">4.82</td>
|
| 234 |
+
</tr>
|
| 235 |
+
<tr>
|
| 236 |
+
<td align="left"><strong>Average</strong></td>
|
| 237 |
+
<td align="center">3.81</td>
|
| 238 |
+
<td align="center">14.05</td>
|
| 239 |
+
<td align="center">3.75</td>
|
| 240 |
+
<td align="center">4.81</td>
|
| 241 |
+
<td align="center"><strong>3.08</strong></td>
|
| 242 |
+
<td align="center">3.19</td>
|
| 243 |
+
</tr>
|
| 244 |
+
<tr>
|
| 245 |
+
<td rowspan="3" style="text-align: center; vertical-align: middle;"><strong>Multilingual </strong></td>
|
| 246 |
+
<td align="left">FLEURS Arabian</td>
|
| 247 |
+
<td align="center">N/A</td>
|
| 248 |
+
<td align="center"><strong>11.72</strong></td>
|
| 249 |
+
<td align="center">N/A</td>
|
| 250 |
+
<td align="center">25.13</td>
|
| 251 |
+
<td align="center">14.22</td>
|
| 252 |
+
<td align="center">16.46</td>
|
| 253 |
+
</tr>
|
| 254 |
+
<tr>
|
| 255 |
+
<td align="left">Common Voice yue</td>
|
| 256 |
+
<td align="center">9.20</td>
|
| 257 |
+
<td align="center">11.10</td>
|
| 258 |
+
<td align="center">38.90</td>
|
| 259 |
+
<td align="center"><strong>7.89</strong></td>
|
| 260 |
+
<td align="center">7.90</td>
|
| 261 |
+
<td align="center">8.32</td>
|
| 262 |
+
</tr>
|
| 263 |
+
<tr>
|
| 264 |
+
<td align="left">FLEURS Japanese</td>
|
| 265 |
+
<td align="center">N/A</td>
|
| 266 |
+
<td align="center"><strong>3.27</strong></td>
|
| 267 |
+
<td align="center">N/A</td>
|
| 268 |
+
<td align="center">10.49</td>
|
| 269 |
+
<td align="center">3.18</td>
|
| 270 |
+
<td align="center">4.67</td>
|
| 271 |
+
</tr>
|
| 272 |
+
<tr>
|
| 273 |
+
<td rowspan="7" style="text-align: center; vertical-align: middle;"><strong>In-house</strong></td>
|
| 274 |
+
<td align="left">Anhui accent</td>
|
| 275 |
+
<td align="center"><strong>8.83</strong></td>
|
| 276 |
+
<td align="center">50.55</td>
|
| 277 |
+
<td align="center">22.17</td>
|
| 278 |
+
<td align="center">18.73</td>
|
| 279 |
+
<td align="center">10.61</td>
|
| 280 |
+
<td align="center">11.65</td>
|
| 281 |
+
</tr>
|
| 282 |
+
<tr>
|
| 283 |
+
<td align="left">Guangdong accent</td>
|
| 284 |
+
<td align="center">4.99</td>
|
| 285 |
+
<td align="center">7.83</td>
|
| 286 |
+
<td align="center"><strong>3.76</strong></td>
|
| 287 |
+
<td align="center">4.03</td>
|
| 288 |
+
<td align="center">3.81</td>
|
| 289 |
+
<td align="center">4.44</td>
|
| 290 |
+
</tr>
|
| 291 |
+
<tr>
|
| 292 |
+
<td align="left">Guangxi accent</td>
|
| 293 |
+
<td align="center">3.37</td>
|
| 294 |
+
<td align="center">7.09</td>
|
| 295 |
+
<td align="center">4.29</td>
|
| 296 |
+
<td align="center"><strong>3.35</strong></td>
|
| 297 |
+
<td align="center">4.11</td>
|
| 298 |
+
<td align="center">3.51</td>
|
| 299 |
+
</tr>
|
| 300 |
+
<tr>
|
| 301 |
+
<td align="left">Shanxi accent</td>
|
| 302 |
+
<td align="center">20.26</td>
|
| 303 |
+
<td align="center">55.03</td>
|
| 304 |
+
<td align="center">34.71</td>
|
| 305 |
+
<td align="center">25.95</td>
|
| 306 |
+
<td align="center"><strong>12.44</strong></td>
|
| 307 |
+
<td align="center">15.60</td>
|
| 308 |
+
</tr>
|
| 309 |
+
<tr>
|
| 310 |
+
<td align="left">Sichuan dialect</td>
|
| 311 |
+
<td align="center"><strong>3.01</strong></td>
|
| 312 |
+
<td align="center">32.85</td>
|
| 313 |
+
<td align="center">5.26</td>
|
| 314 |
+
<td align="center">5.61</td>
|
| 315 |
+
<td align="center">4.35</td>
|
| 316 |
+
<td align="center">4.57</td>
|
| 317 |
+
</tr>
|
| 318 |
+
<tr>
|
| 319 |
+
<td align="left">Shanghai dialect</td>
|
| 320 |
+
<td align="center">47.49</td>
|
| 321 |
+
<td align="center">89.58</td>
|
| 322 |
+
<td align="center">82.90</td>
|
| 323 |
+
<td align="center">58.74</td>
|
| 324 |
+
<td align="center"><strong>17.77</strong></td>
|
| 325 |
+
<td align="center">19.30</td>
|
| 326 |
+
</tr>
|
| 327 |
+
<tr>
|
| 328 |
+
<td align="left"><strong>Average</strong></td>
|
| 329 |
+
<td align="center">14.66</td>
|
| 330 |
+
<td align="center">40.49</td>
|
| 331 |
+
<td align="center">25.52</td>
|
| 332 |
+
<td align="center">19.40</td>
|
| 333 |
+
<td align="center"><strong>8.85</strong></td>
|
| 334 |
+
<td align="center">9.85</td>
|
| 335 |
+
</tr>
|
| 336 |
+
</tbody>
|
| 337 |
+
</table>
|
| 338 |
+
|
| 339 |
+
### Paralinguistic information understanding
|
| 340 |
+
StepEval-Audio-Paralinguistic
|
| 341 |
+
<table border="1" cellpadding="5" cellspacing="0" align="center">
|
| 342 |
+
<thead>
|
| 343 |
+
<tr>
|
| 344 |
+
<th style="text-align: center;" rowspan="2">Model</th>
|
| 345 |
+
<th style="text-align: center;" rowspan="2">Avg.</th>
|
| 346 |
+
<th style="text-align: center;" rowspan="2">Gender</th>
|
| 347 |
+
<th style="text-align: center;" rowspan="2">Age</th>
|
| 348 |
+
<th style="text-align: center;" rowspan="2">Timbre</th>
|
| 349 |
+
<th style="text-align: center;" rowspan="2">Scenario</th>
|
| 350 |
+
<th style="text-align: center;" rowspan="2">Event</th>
|
| 351 |
+
<th style="text-align: center;" rowspan="2">Emotion</th>
|
| 352 |
+
<th style="text-align: center;" rowspan="2">Pitch</th>
|
| 353 |
+
<th style="text-align: center;" rowspan="2">Rhythm</th>
|
| 354 |
+
<th style="text-align: center;" rowspan="2">Speed</th>
|
| 355 |
+
<th style="text-align: center;" rowspan="2">Style</th>
|
| 356 |
+
<th style="text-align: center;" rowspan="2">Vocal</th>
|
| 357 |
+
</tr>
|
| 358 |
+
</thead>
|
| 359 |
+
<tbody>
|
| 360 |
+
<tr>
|
| 361 |
+
<td align="left"><strong>GPT-4o Audio</strong></td>
|
| 362 |
+
<td align="center">43.45</td>
|
| 363 |
+
<td align="center">18</td>
|
| 364 |
+
<td align="center">42</td>
|
| 365 |
+
<td align="center">34</td>
|
| 366 |
+
<td align="center">22</td>
|
| 367 |
+
<td align="center">14</td>
|
| 368 |
+
<td align="center">82</td>
|
| 369 |
+
<td align="center">40</td>
|
| 370 |
+
<td align="center">60</td>
|
| 371 |
+
<td align="center">58</td>
|
| 372 |
+
<td align="center">64</td>
|
| 373 |
+
<td align="center">44</td>
|
| 374 |
+
</tr>
|
| 375 |
+
<tr>
|
| 376 |
+
<td align="left"><strong>Kimi-Audio</strong></td>
|
| 377 |
+
<td align="center">49.64</td>
|
| 378 |
+
<td align="center">94</td>
|
| 379 |
+
<td align="center">50</td>
|
| 380 |
+
<td align="center">10</td>
|
| 381 |
+
<td align="center">30</td>
|
| 382 |
+
<td align="center">48</td>
|
| 383 |
+
<td align="center">66</td>
|
| 384 |
+
<td align="center">56</td>
|
| 385 |
+
<td align="center">40</td>
|
| 386 |
+
<td align="center">44</td>
|
| 387 |
+
<td align="center">54</td>
|
| 388 |
+
<td align="center">54</td>
|
| 389 |
+
</tr>
|
| 390 |
+
<tr>
|
| 391 |
+
<td align="left"><strong>Qwen-Omni</strong></td>
|
| 392 |
+
<td align="center">44.18</td>
|
| 393 |
+
<td align="center">40</td>
|
| 394 |
+
<td align="center">50</td>
|
| 395 |
+
<td align="center">16</td>
|
| 396 |
+
<td align="center">28</td>
|
| 397 |
+
<td align="center">42</td>
|
| 398 |
+
<td align="center">76</td>
|
| 399 |
+
<td align="center">32</td>
|
| 400 |
+
<td align="center">54</td>
|
| 401 |
+
<td align="center">50</td>
|
| 402 |
+
<td align="center">50</td>
|
| 403 |
+
<td align="center">48</td>
|
| 404 |
+
</tr>
|
| 405 |
+
<tr>
|
| 406 |
+
<td align="left"><strong>Step-Audio-AQAA</strong></td>
|
| 407 |
+
<td align="center">36.91</td>
|
| 408 |
+
<td align="center">70</td>
|
| 409 |
+
<td align="center">66</td>
|
| 410 |
+
<td align="center">18</td>
|
| 411 |
+
<td align="center">14</td>
|
| 412 |
+
<td align="center">14</td>
|
| 413 |
+
<td align="center">40</td>
|
| 414 |
+
<td align="center">38</td>
|
| 415 |
+
<td align="center">48</td>
|
| 416 |
+
<td align="center">54</td>
|
| 417 |
+
<td align="center">44</td>
|
| 418 |
+
<td align="center">0</td>
|
| 419 |
+
</tr>
|
| 420 |
+
<tr>
|
| 421 |
+
<td align="left"><strong>Step-Audio 2</strong></td>
|
| 422 |
+
<td align="center"><strong>83.09</strong></td>
|
| 423 |
+
<td align="center"><strong>100</strong></td>
|
| 424 |
+
<td align="center"><strong>96</strong></td>
|
| 425 |
+
<td align="center"><strong>82</strong></td>
|
| 426 |
+
<td align="center"><strong>78</strong></td>
|
| 427 |
+
<td align="center"><strong>60</strong></td>
|
| 428 |
+
<td align="center"><strong>86</strong></td>
|
| 429 |
+
<td align="center"><strong>82</strong></td>
|
| 430 |
+
<td align="center"><strong>86</strong></td>
|
| 431 |
+
<td align="center"><strong>88</strong></td>
|
| 432 |
+
<td align="center"><strong>88</strong></td>
|
| 433 |
+
<td align="center">68</td>
|
| 434 |
+
</tr>
|
| 435 |
+
<tr>
|
| 436 |
+
<td align="left"><strong>Step-Audio 2 mini</strong></td>
|
| 437 |
+
<td align="center">80.00</td>
|
| 438 |
+
<td align="center"><strong>100</strong></td>
|
| 439 |
+
<td align="center">94</td>
|
| 440 |
+
<td align="center">80</td>
|
| 441 |
+
<td align="center"><strong>78</strong></td>
|
| 442 |
+
<td align="center"><strong>60</strong></td>
|
| 443 |
+
<td align="center">82</td>
|
| 444 |
+
<td align="center"><strong>82</strong></td>
|
| 445 |
+
<td align="center">68</td>
|
| 446 |
+
<td align="center">74</td>
|
| 447 |
+
<td align="center">86</td>
|
| 448 |
+
<td align="center"><strong>76</strong></td>
|
| 449 |
+
</tr>
|
| 450 |
+
</tbody>
|
| 451 |
+
</table>
|
| 452 |
+
|
| 453 |
+
### Audio understanding and reasoning
|
| 454 |
+
MMAU
|
| 455 |
+
<table border="1" cellpadding="5" cellspacing="0" align="center">
|
| 456 |
+
<thead>
|
| 457 |
+
<tr>
|
| 458 |
+
<th style="text-align: center;">Model</th>
|
| 459 |
+
<th style="text-align: center;">Avg.</th>
|
| 460 |
+
<th style="text-align: center;">Sound</th>
|
| 461 |
+
<th style="text-align: center;">Speech</th>
|
| 462 |
+
<th style="text-align: center;">Music</th>
|
| 463 |
+
</tr>
|
| 464 |
+
</thead>
|
| 465 |
+
<tbody>
|
| 466 |
+
<tr>
|
| 467 |
+
<td align="left"><strong>Audio Flamingo 3</strong></td>
|
| 468 |
+
<td align="center">73.1</td>
|
| 469 |
+
<td align="center">76.9</td>
|
| 470 |
+
<td align="center">66.1</td>
|
| 471 |
+
<td align="center"><strong>73.9</strong></td>
|
| 472 |
+
</tr>
|
| 473 |
+
<tr>
|
| 474 |
+
<td align="left"><strong>Gemini 2.5 Pro</strong></td>
|
| 475 |
+
<td align="center">71.6</td>
|
| 476 |
+
<td align="center">75.1</td>
|
| 477 |
+
<td align="center">71.5</td>
|
| 478 |
+
<td align="center">68.3</td>
|
| 479 |
+
</tr>
|
| 480 |
+
<tr>
|
| 481 |
+
<td align="left"><strong>GPT-4o Audio</strong></td>
|
| 482 |
+
<td align="center">58.1</td>
|
| 483 |
+
<td align="center">58.0</td>
|
| 484 |
+
<td align="center">64.6</td>
|
| 485 |
+
<td align="center">51.8</td>
|
| 486 |
+
</tr>
|
| 487 |
+
<tr>
|
| 488 |
+
<td align="left"><strong>Kimi-Audio</strong></td>
|
| 489 |
+
<td align="center">69.6</td>
|
| 490 |
+
<td align="center">79.0</td>
|
| 491 |
+
<td align="center">65.5</td>
|
| 492 |
+
<td align="center">64.4</td>
|
| 493 |
+
</tr>
|
| 494 |
+
<tr>
|
| 495 |
+
<td align="left"><strong>Omni-R1</strong></td>
|
| 496 |
+
<td align="center">77.0</td>
|
| 497 |
+
<td align="center">81.7</td>
|
| 498 |
+
<td align="center">76.0</td>
|
| 499 |
+
<td align="center">73.4</td>
|
| 500 |
+
</tr>
|
| 501 |
+
<tr>
|
| 502 |
+
<td align="left"><strong>Qwen2.5-Omni</strong></td>
|
| 503 |
+
<td align="center">71.5</td>
|
| 504 |
+
<td align="center">78.1</td>
|
| 505 |
+
<td align="center">70.6</td>
|
| 506 |
+
<td align="center">65.9</td>
|
| 507 |
+
</tr>
|
| 508 |
+
<tr>
|
| 509 |
+
<td align="left"><strong>Step-Audio-AQAA</strong></td>
|
| 510 |
+
<td align="center">49.7</td>
|
| 511 |
+
<td align="center">50.5</td>
|
| 512 |
+
<td align="center">51.4</td>
|
| 513 |
+
<td align="center">47.3</td>
|
| 514 |
+
</tr>
|
| 515 |
+
<tr>
|
| 516 |
+
<td align="left"><strong>Step-Audio 2</strong></td>
|
| 517 |
+
<td align="center"><strong>78.0</strong></td>
|
| 518 |
+
<td align="center"><strong>83.5</strong></td>
|
| 519 |
+
<td align="center"><strong>76.9</strong></td>
|
| 520 |
+
<td align="center">73.7</td>
|
| 521 |
+
</tr>
|
| 522 |
+
<tr>
|
| 523 |
+
<td align="left"><strong>Step-Audio 2 mini</strong></td>
|
| 524 |
+
<td align="center">73.2</td>
|
| 525 |
+
<td align="center">76.6</td>
|
| 526 |
+
<td align="center">71.5</td>
|
| 527 |
+
<td align="center">71.6</td>
|
| 528 |
+
</tr>
|
| 529 |
+
</tbody>
|
| 530 |
+
</table>
|
| 531 |
+
|
| 532 |
+
### Speech translation
|
| 533 |
+
|
| 534 |
+
<table border="1" cellpadding="5" cellspacing="0" align="center">
|
| 535 |
+
<thead>
|
| 536 |
+
<tr>
|
| 537 |
+
<th style="text-align: center;" rowspan="2">Model</th>
|
| 538 |
+
<th style="text-align: center;" colspan="3">CoVoST 2 (S2TT)</th>
|
| 539 |
+
</tr>
|
| 540 |
+
<tr>
|
| 541 |
+
<th>Avg.</th>
|
| 542 |
+
<th>English-to-Chinese</th>
|
| 543 |
+
<th>Chinese-to-English</th>
|
| 544 |
+
</tr>
|
| 545 |
+
</thead>
|
| 546 |
+
<tbody>
|
| 547 |
+
<tr>
|
| 548 |
+
<td align="left"><strong>GPT-4o Audio</strong></td>
|
| 549 |
+
<td align="center">29.61</td>
|
| 550 |
+
<td align="center">40.20</td>
|
| 551 |
+
<td align="center">19.01</td>
|
| 552 |
+
</tr>
|
| 553 |
+
<tr>
|
| 554 |
+
<td align="left"><strong>Qwen2.5-Omni</strong></td>
|
| 555 |
+
<td align="center">35.40</td>
|
| 556 |
+
<td align="center">41.40</td>
|
| 557 |
+
<td align="center">29.40</td>
|
| 558 |
+
</tr>
|
| 559 |
+
<tr>
|
| 560 |
+
<td align="left"><strong>Step-Audio-AQAA</strong></td>
|
| 561 |
+
<td align="center">28.57</td>
|
| 562 |
+
<td align="center">37.71</td>
|
| 563 |
+
<td align="center">19.43</td>
|
| 564 |
+
</tr>
|
| 565 |
+
<tr>
|
| 566 |
+
<td align="left"><strong>Step-Audio 2</strong></td>
|
| 567 |
+
<td align="center">39.26</td>
|
| 568 |
+
<td align="center">49.01</td>
|
| 569 |
+
<td align="center"><strong>29.51</strong></td>
|
| 570 |
+
</tr>
|
| 571 |
+
<tr>
|
| 572 |
+
<td align="left"><strong>Step-Audio 2 mini</strong></td>
|
| 573 |
+
<td align="center"><strong>39.29</strong></td>
|
| 574 |
+
<td align="center"><strong>49.12</strong></td>
|
| 575 |
+
<td align="center">29.47</td>
|
| 576 |
+
</tr>
|
| 577 |
+
</tbody>
|
| 578 |
+
</table>
|
| 579 |
+
|
| 580 |
+
<table border="1" cellpadding="5" cellspacing="0" align="center">
|
| 581 |
+
<thead>
|
| 582 |
+
<tr>
|
| 583 |
+
<th style="text-align: center;" rowspan="2">Model</th>
|
| 584 |
+
<th style="text-align: center;" colspan="3">CVSS (S2ST)</th>
|
| 585 |
+
</tr>
|
| 586 |
+
<tr>
|
| 587 |
+
<th>Avg.</th>
|
| 588 |
+
<th>English-to-Chinese</th>
|
| 589 |
+
<th>Chinese-to-English</th>
|
| 590 |
+
</tr>
|
| 591 |
+
</thead>
|
| 592 |
+
<tbody>
|
| 593 |
+
<tr>
|
| 594 |
+
<td align="left"><strong>GPT-4o Audio</strong></td>
|
| 595 |
+
<td align="center">23.68</td>
|
| 596 |
+
<td align="center">20.07</td>
|
| 597 |
+
<td align="center"><strong>27.29</strong></td>
|
| 598 |
+
</tr>
|
| 599 |
+
<tr>
|
| 600 |
+
<td align="left"><strong>Qwen-Omni</strong></td>
|
| 601 |
+
<td align="center">15.35</td>
|
| 602 |
+
<td align="center">8.04</td>
|
| 603 |
+
<td align="center">22.66</td>
|
| 604 |
+
</tr>
|
| 605 |
+
<tr>
|
| 606 |
+
<td align="left"><strong>Step-Audio-AQAA</strong></td>
|
| 607 |
+
<td align="center">27.36</td>
|
| 608 |
+
<td align="center">30.74</td>
|
| 609 |
+
<td align="center">23.98</td>
|
| 610 |
+
</tr>
|
| 611 |
+
<tr>
|
| 612 |
+
<td align="left"><strong>Step-Audio 2</strong></td>
|
| 613 |
+
<td align="center"><strong>30.87</strong></td>
|
| 614 |
+
<td align="center"><strong>34.83</strong></td>
|
| 615 |
+
<td align="center">26.92</td>
|
| 616 |
+
</tr>
|
| 617 |
+
<tr>
|
| 618 |
+
<td align="left"><strong>Step-Audio 2 mini</strong></td>
|
| 619 |
+
<td align="center">29.08</td>
|
| 620 |
+
<td align="center">32.81</td>
|
| 621 |
+
<td align="center">25.35</td>
|
| 622 |
+
</tr>
|
| 623 |
+
</tbody>
|
| 624 |
+
</table>
|
| 625 |
+
|
| 626 |
+
### Tool calling
|
| 627 |
+
StepEval-Audio-Toolcall. Date and time tools have no parameter.
|
| 628 |
+
<table border="1" cellpadding="5" cellspacing="0" align="center">
|
| 629 |
+
<thead>
|
| 630 |
+
<tr>
|
| 631 |
+
<th style="text-align: center;">Model</th>
|
| 632 |
+
<th style="text-align: center;">Objective</th>
|
| 633 |
+
<th style="text-align: center;">Metric</th>
|
| 634 |
+
<th style="text-align: center;">Audio search</th>
|
| 635 |
+
<th style="text-align: center;">Date & Time</th>
|
| 636 |
+
<th style="text-align: center;">Weather</th>
|
| 637 |
+
<th style="text-align: center;">Web search</th>
|
| 638 |
+
</tr>
|
| 639 |
+
</thead>
|
| 640 |
+
<tbody>
|
| 641 |
+
<tr>
|
| 642 |
+
<td style="text-align: center; vertical-align: middle;" rowspan="3"><strong>Qwen3-32B</strong><sup>†</sup></td>
|
| 643 |
+
<td align="center"><strong>Trigger</strong></td>
|
| 644 |
+
<td align="center"><strong>Precision / Recall</strong></td>
|
| 645 |
+
<td align="center">67.5 / 98.5</td>
|
| 646 |
+
<td align="center">98.4 / 100.0</td>
|
| 647 |
+
<td align="center">90.1 / 100.0</td>
|
| 648 |
+
<td align="center">86.8 / 98.5</td>
|
| 649 |
+
</tr>
|
| 650 |
+
<tr>
|
| 651 |
+
<td align="center"><strong>Type</strong></td>
|
| 652 |
+
<td align="center"><strong>Accuracy</strong></td>
|
| 653 |
+
<td align="center">100.0</td>
|
| 654 |
+
<td align="center">100.0</td>
|
| 655 |
+
<td align="center">98.5</td>
|
| 656 |
+
<td align="center">98.5</td>
|
| 657 |
+
</tr>
|
| 658 |
+
<tr>
|
| 659 |
+
<td align="center"><strong>Parameter</strong></td>
|
| 660 |
+
<td align="center"><strong>Accuracy</strong></td>
|
| 661 |
+
<td align="center">100.0</td>
|
| 662 |
+
<td align="center">N/A</td>
|
| 663 |
+
<td align="center">100.0</td>
|
| 664 |
+
<td align="center">100.0</td>
|
| 665 |
+
</tr>
|
| 666 |
+
<tr>
|
| 667 |
+
<td style="text-align: center; vertical-align: middle;" rowspan="3"><strong>Step-Audio 2</strong></td>
|
| 668 |
+
<td align="center"><strong>Trigger</strong></td>
|
| 669 |
+
<td align="center"><strong>Precision / Recall</strong></td>
|
| 670 |
+
<td align="center">86.8 / 99.5</td>
|
| 671 |
+
<td align="center">96.9 / 98.4</td>
|
| 672 |
+
<td align="center">92.2 / 100.0</td>
|
| 673 |
+
<td align="center">88.4 / 95.5</td>
|
| 674 |
+
</tr>
|
| 675 |
+
<tr>
|
| 676 |
+
<td align="center"><strong>Type</strong></td>
|
| 677 |
+
<td align="center"><strong>Accuracy</strong></td>
|
| 678 |
+
<td align="center">100.0</td>
|
| 679 |
+
<td align="center">100.0</td>
|
| 680 |
+
<td align="center">90.5</td>
|
| 681 |
+
<td align="center">98.4</td>
|
| 682 |
+
</tr>
|
| 683 |
+
<tr>
|
| 684 |
+
<td align="center"><strong>Parameter</strong></td>
|
| 685 |
+
<td align="center"><strong>Accuracy</strong></td>
|
| 686 |
+
<td align="center">100.0</td>
|
| 687 |
+
<td align="center">N/A</td>
|
| 688 |
+
<td align="center">100.0</td>
|
| 689 |
+
<td align="center">100.0</td>
|
| 690 |
+
</tr>
|
| 691 |
+
</tbody>
|
| 692 |
+
</table>
|
| 693 |
+
|
| 694 |
+
### Speech-to-speech conversation
|
| 695 |
+
URO-Bench. U. R. O. stands for understanding, reasoning, and oral conversation, respectively.
|
| 696 |
+
|
| 697 |
+
<table border="1" cellpadding="5" cellspacing="0" align="center">
|
| 698 |
+
<thead>
|
| 699 |
+
<tr>
|
| 700 |
+
<th style="text-align: center;" rowspan="2">Model</th>
|
| 701 |
+
<th style="text-align: center;" rowspan="2">Language</th>
|
| 702 |
+
<th style="text-align: center;" colspan="4">Basic</th>
|
| 703 |
+
<th style="text-align: center;" colspan="4">Pro</th>
|
| 704 |
+
</tr>
|
| 705 |
+
<tr>
|
| 706 |
+
<th style="text-align: center;">Avg.</th>
|
| 707 |
+
<th style="text-align: center;">U.</th>
|
| 708 |
+
<th style="text-align: center;">R.</th>
|
| 709 |
+
<th style="text-align: center;">O.</th>
|
| 710 |
+
<th style="text-align: center;">Avg.</th>
|
| 711 |
+
<th style="text-align: center;">U.</th>
|
| 712 |
+
<th style="text-align: center;">R.</th>
|
| 713 |
+
<th style="text-align: center;">O.</th>
|
| 714 |
+
</tr>
|
| 715 |
+
</thead>
|
| 716 |
+
<tbody>
|
| 717 |
+
<tr>
|
| 718 |
+
<td align="left"><strong>GPT-4o Audio</strong></td>
|
| 719 |
+
<td rowspan="6" style="text-align: center; vertical-align: middle;"><strong>Chinese</strong></td>
|
| 720 |
+
<td align="center">78.59</td>
|
| 721 |
+
<td align="center">89.40</td>
|
| 722 |
+
<td align="center">65.48</td>
|
| 723 |
+
<td align="center">85.24</td>
|
| 724 |
+
<td align="center">67.10</td>
|
| 725 |
+
<td align="center">70.60</td>
|
| 726 |
+
<td align="center">57.22</td>
|
| 727 |
+
<td align="center">70.20</td>
|
| 728 |
+
</tr>
|
| 729 |
+
<tr>
|
| 730 |
+
<td align="left"><strong>Kimi-Audio</strong></td>
|
| 731 |
+
<td align="center">73.59</td>
|
| 732 |
+
<td align="center">79.34</td>
|
| 733 |
+
<td align="center">64.66</td>
|
| 734 |
+
<td align="center">79.75</td>
|
| 735 |
+
<td align="center">66.07</td>
|
| 736 |
+
<td align="center">60.44</td>
|
| 737 |
+
<td align="center">59.29</td>
|
| 738 |
+
<td align="center"><strong>76.21</strong></td>
|
| 739 |
+
</tr>
|
| 740 |
+
<tr>
|
| 741 |
+
<td align="left"><strong>Qwen-Omni</strong></td>
|
| 742 |
+
<td align="center">68.98</td>
|
| 743 |
+
<td align="center">59.66</td>
|
| 744 |
+
<td align="center">69.74</td>
|
| 745 |
+
<td align="center">77.27</td>
|
| 746 |
+
<td align="center">59.11</td>
|
| 747 |
+
<td align="center">59.01</td>
|
| 748 |
+
<td align="center">59.82</td>
|
| 749 |
+
<td align="center">58.74</td>
|
| 750 |
+
</tr>
|
| 751 |
+
<tr>
|
| 752 |
+
<td align="left"><strong>Step-Audio-AQAA</strong></td>
|
| 753 |
+
<td align="center">74.71</td>
|
| 754 |
+
<td align="center">87.61</td>
|
| 755 |
+
<td align="center">59.63</td>
|
| 756 |
+
<td align="center">81.93</td>
|
| 757 |
+
<td align="center">65.61</td>
|
| 758 |
+
<td align="center">74.76</td>
|
| 759 |
+
<td align="center">47.29</td>
|
| 760 |
+
<td align="center">68.97</td>
|
| 761 |
+
</tr>
|
| 762 |
+
<tr>
|
| 763 |
+
<td align="left"><strong>Step-Audio 2</strong></td>
|
| 764 |
+
<td align="center"><strong>83.32</strong></td>
|
| 765 |
+
<td align="center"><strong>91.05</strong></td>
|
| 766 |
+
<td align="center"><strong>75.45</strong></td>
|
| 767 |
+
<align="center"><strong>86.08</strong></td>
|
| 768 |
+
<td align="center">68.25</td>
|
| 769 |
+
<td align="center">74.78</td>
|
| 770 |
+
<td align="center"><strong>63.18</strong></td>
|
| 771 |
+
<td align="center">65.10</td>
|
| 772 |
+
</tr>
|
| 773 |
+
<tr>
|
| 774 |
+
<td align="left"><strong>Step-Audio 2 mini</strong></td>
|
| 775 |
+
<td align="center">77.81</td>
|
| 776 |
+
<td align="center">89.19</td>
|
| 777 |
+
<td align="center">64.53</td>
|
| 778 |
+
<td align="center">84.12</td>
|
| 779 |
+
<td align="center"><strong>69.57</strong></td>
|
| 780 |
+
<td align="center"><strong>76.84</strong></td>
|
| 781 |
+
<td align="center">58.90</td>
|
| 782 |
+
<td align="center">69.42</td>
|
| 783 |
+
</tr>
|
| 784 |
+
<tr>
|
| 785 |
+
<td align="left"><strong>GPT-4o Audio</strong></td>
|
| 786 |
+
<td rowspan="6" style="text-align: center; vertical-align: middle;"><strong>English</strong></td>
|
| 787 |
+
<td align="center"><strong>84.54</strong></td>
|
| 788 |
+
<td align="center">90.18</td>
|
| 789 |
+
<td align="center">75.90</td>
|
| 790 |
+
<td align="center"><strong>90.41</strong></td>
|
| 791 |
+
<td align="center"><strong>67.51</strong></td>
|
| 792 |
+
<td align="center">60.65</td>
|
| 793 |
+
<td align="center">64.36</td>
|
| 794 |
+
<td align="center"><strong>78.46</strong></td>
|
| 795 |
+
</tr>
|
| 796 |
+
<tr>
|
| 797 |
+
<td align="left"><strong>Kimi-Audio</strong></td>
|
| 798 |
+
<td align="center">60.04</td>
|
| 799 |
+
<td align="center">83.36</td>
|
| 800 |
+
<td align="center">42.31</td>
|
| 801 |
+
<td align="center">60.36</td>
|
| 802 |
+
<td align="center">49.79</td>
|
| 803 |
+
<td align="center">50.32</td>
|
| 804 |
+
<td align="center">40.59</td>
|
| 805 |
+
<td align="center">56.04</td>
|
| 806 |
+
</tr>
|
| 807 |
+
<tr>
|
| 808 |
+
<td align="left"><strong>Qwen-Omni</strong></td>
|
| 809 |
+
<td align="center">70.58</td>
|
| 810 |
+
<td align="center">66.29</td>
|
| 811 |
+
<td align="center">69.62</td>
|
| 812 |
+
<td align="center">76.16</td>
|
| 813 |
+
<td align="center">50.99</td>
|
| 814 |
+
<td align="center">44.51</td>
|
| 815 |
+
<td align="center">63.88</td>
|
| 816 |
+
<td align="center">49.41</td>
|
| 817 |
+
</tr>
|
| 818 |
+
<tr>
|
| 819 |
+
<td align="left"><strong>Step-Audio-AQAA</strong></td>
|
| 820 |
+
<td align="center">71.11</td>
|
| 821 |
+
<td align="center">90.15</td>
|
| 822 |
+
<td align="center">56.12</td>
|
| 823 |
+
<td align="center">72.06</td>
|
| 824 |
+
<td align="center">52.01</td>
|
| 825 |
+
<td align="center">44.25</td>
|
| 826 |
+
<td align="center">54.54</td>
|
| 827 |
+
<td align="center">59.81</td>
|
| 828 |
+
</tr>
|
| 829 |
+
<tr>
|
| 830 |
+
<td align="left"><strong>Step-Audio 2</strong></td>
|
| 831 |
+
<td align="center">83.90</td>
|
| 832 |
+
<td align="center"><strong>92.72</strong></td>
|
| 833 |
+
<td align="center"><strong>76.51</strong></td>
|
| 834 |
+
<td align="center">84.92</td>
|
| 835 |
+
<td align="center">66.07</td>
|
| 836 |
+
<td align="center"><strong>64.86</strong></td>
|
| 837 |
+
<td align="center"><strong>67.75</strong></td>
|
| 838 |
+
<td align="center">66.33</td>
|
| 839 |
+
</tr>
|
| 840 |
+
<tr>
|
| 841 |
+
<td align="left"><strong>Step-Audio 2 mini</strong></td>
|
| 842 |
+
<td align="center">74.36</td>
|
| 843 |
+
<td align="center">90.07</td>
|
| 844 |
+
<td align="center">60.12</td>
|
| 845 |
+
<td align="center">77.65</td>
|
| 846 |
+
<td align="center">61.25</td>
|
| 847 |
+
<align="center">58.79</td>
|
| 848 |
+
<td align="center">61.94</td>
|
| 849 |
+
<td align="center">63.80</td>
|
| 850 |
+
</tr>
|
| 851 |
+
</tbody>
|
| 852 |
+
</table>
|
| 853 |
+
|
| 854 |
+
<!-- ## Online Engine
|
| 855 |
+
The online version of Step-Audio can be accessed from app version of [跃问](https://yuewen.cn), where some impressive examples can be found as well.
|
| 856 |
+
|
| 857 |
+
<img src="./assets/yuewen.jpeg" width="200" alt="QR code"> -->
|
| 858 |
+
|
| 859 |
+
## License
|
| 860 |
+
|
| 861 |
+
The model and code in the repository is licensed under [Apache 2.0](LICENSE) License.
|
| 862 |
+
|
| 863 |
+
## Citation
|
| 864 |
+
|
| 865 |
+
```
|
| 866 |
+
@misc{wu2025stepaudio2technicalreport,
|
| 867 |
+
title={Step-Audio 2 Technical Report},
|
| 868 |
+
author={Boyong Wu and Chao Yan and Chen Hu and Cheng Yi and Chengli Feng and Fei Tian and Feiyu Shen and Gang Yu and Haoyang Zhang and Jingbei Li and Mingrui Chen and Peng Liu and Wang You and Xiangyu Tony Zhang and Xingyuan Li and Xuerui Yang and Yayue Deng and Yechang Huang and Yuxin Li and Yuxin Zhang and Zhao You and Brian Li and Changyi Wan and Hanpeng Hu and Jiangjie Zhen and Siyu Chen and Song Yuan and Xuelin Zhang and Yimin Jiang and Yu Zhou and Yuxiang Yang and Bingxin Li and Buyun Ma and Changhe Song and Dongqing Pang and Guoqiang Hu and Haiyang Sun and Kang An and Na Wang and Shuli Gao and Wei Ji and Wen Li and Wen Sun and Xuan Wen and Yong Ren and Yuankai Ma and Yufan Lu and Bin Wang and Bo Li and Changxin Miao and Che Liu and Chen Xu and Dapeng Shi and Dingyuan Hu and Donghang Wu and Enle Liu and Guanzhe Huang and Gulin Yan and Han Zhang and Hao Nie and Haonan Jia and Hongyu Zhou and Jianjian Sun and Jiaoren Wu and Jie Wu and Jie Yang and Jin Yang and Junzhe Lin and Kaixiang Li and Lei Yang and Liying Shi and Li Zhou and Longlong Gu and Ming Li and Mingliang Li and Mingxiao Li and Nan Wu and Qi Han and Qinyuan Tan and Shaoliang Pang and Shengjie Fan and Siqi Liu and Tiancheng Cao and Wanying Lu and Wenqing He and Wuxun Xie and Xu Zhao and Xueqi Li and Yanbo Yu and Yang Yang and Yi Liu and Yifan Lu and Yilei Wang and Yuanhao Ding and Yuanwei Liang and Yuanwei Lu and Yuchu Luo and Yuhe Yin and Yumeng Zhan and Yuxiang Zhang and Zidong Yang and Zixin Zhang and Binxing Jiao and Daxin Jiang and Heung-Yeung Shum and Jiansheng Chen and Jing Li and Xiangyu Zhang and Yibo Zhu},
|
| 869 |
+
year={2025},
|
| 870 |
+
eprint={2507.16632},
|
| 871 |
+
archivePrefix={arXiv},
|
| 872 |
+
primaryClass={cs.CL},
|
| 873 |
+
url={https://arxiv.org/abs/2507.16632},
|
| 874 |
+
}
|
| 875 |
+
```
|
models/Step-Audio-2-mini/added_tokens.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
models/Step-Audio-2-mini/assets/architecture5.png
ADDED
|
Git LFS Details
|
models/Step-Audio-2-mini/assets/arxiv.svg
ADDED
|
|
models/Step-Audio-2-mini/assets/logo.png
ADDED
|
models/Step-Audio-2-mini/assets/qrcode.jpg
ADDED
|
models/Step-Audio-2-mini/assets/radar.png
ADDED
|
Git LFS Details
|
models/Step-Audio-2-mini/assets/wechat_group.png
ADDED
|
models/Step-Audio-2-mini/config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"StepAudio2ForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "configuration_step_audio_2.StepAudio2Config",
|
| 7 |
+
"AutoModelForCausalLM": "modeling_step_audio_2.StepAudio2ForCausalLM"
|
| 8 |
+
},
|
| 9 |
+
"model_type": "step_audio_2",
|
| 10 |
+
"text_config": {
|
| 11 |
+
"hidden_size": 3584,
|
| 12 |
+
"intermediate_size": 18944,
|
| 13 |
+
"num_attention_heads": 28,
|
| 14 |
+
"num_attention_groups": 4,
|
| 15 |
+
"num_key_value_heads": 4,
|
| 16 |
+
"num_hidden_layers": 28,
|
| 17 |
+
"max_seq_len": 16384,
|
| 18 |
+
"vocab_size": 158720,
|
| 19 |
+
"rms_norm_eps": 1e-06,
|
| 20 |
+
"eos_token_id": 151643,
|
| 21 |
+
"pad_token_id": 151643,
|
| 22 |
+
"rope_theta": 1000000.0,
|
| 23 |
+
"max_position_embeddings": 16384,
|
| 24 |
+
"rope_scaling": null,
|
| 25 |
+
"torch_dtype": "bfloat16"
|
| 26 |
+
},
|
| 27 |
+
"audio_encoder_config": {
|
| 28 |
+
"n_mels": 128,
|
| 29 |
+
"n_audio_ctx": 1500,
|
| 30 |
+
"n_audio_state": 1280,
|
| 31 |
+
"n_audio_head": 20,
|
| 32 |
+
"n_audio_layer": 32,
|
| 33 |
+
"n_codebook_size": 4096,
|
| 34 |
+
"llm_dim": 3584,
|
| 35 |
+
"kernel_size": 3,
|
| 36 |
+
"adapter_stride": 2
|
| 37 |
+
}
|
| 38 |
+
}
|
models/Step-Audio-2-mini/configuration_step_audio_2.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Union
|
| 2 |
+
|
| 3 |
+
from transformers import Qwen2Config
|
| 4 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class StepAudio2EncoderConfig(PretrainedConfig):
|
| 8 |
+
model_type = "step_audio_2_encoder"
|
| 9 |
+
|
| 10 |
+
def __init__(
|
| 11 |
+
self,
|
| 12 |
+
n_mels=128,
|
| 13 |
+
n_audio_ctx=1500,
|
| 14 |
+
n_audio_state=512,
|
| 15 |
+
n_audio_head=8,
|
| 16 |
+
n_audio_layer=6,
|
| 17 |
+
llm_dim=4096,
|
| 18 |
+
kernel_size=3,
|
| 19 |
+
adapter_stride=2,
|
| 20 |
+
**kwargs,
|
| 21 |
+
):
|
| 22 |
+
self.n_mels = n_mels
|
| 23 |
+
self.n_audio_ctx = n_audio_ctx
|
| 24 |
+
self.n_audio_state = n_audio_state
|
| 25 |
+
self.n_audio_head = n_audio_head
|
| 26 |
+
self.n_audio_layer = n_audio_layer
|
| 27 |
+
self.llm_dim = llm_dim
|
| 28 |
+
self.kernel_size = kernel_size
|
| 29 |
+
self.adapter_stride = adapter_stride
|
| 30 |
+
super().__init__(**kwargs)
|
| 31 |
+
|
| 32 |
+
class StepAudio2TextConfig(PretrainedConfig):
|
| 33 |
+
model_type = "step_audio_2_text"
|
| 34 |
+
|
| 35 |
+
def __init__(
|
| 36 |
+
self,
|
| 37 |
+
vocab_size=64012,
|
| 38 |
+
hidden_size=4096,
|
| 39 |
+
intermediate_size=11008,
|
| 40 |
+
num_hidden_layers=48,
|
| 41 |
+
num_attention_heads=32,
|
| 42 |
+
num_attention_groups=4,
|
| 43 |
+
num_key_value_heads=4,
|
| 44 |
+
hidden_act="silu",
|
| 45 |
+
max_position_embeddings=8192,
|
| 46 |
+
initializer_range=0.02,
|
| 47 |
+
rms_norm_eps=1e-6,
|
| 48 |
+
rope_theta=1000000.0,
|
| 49 |
+
rope_scaling=None,
|
| 50 |
+
eos_token_id=None,
|
| 51 |
+
**kwargs
|
| 52 |
+
):
|
| 53 |
+
|
| 54 |
+
if eos_token_id is not None:
|
| 55 |
+
if isinstance(eos_token_id, list):
|
| 56 |
+
eos_token_id = list(set([151643, 151645, 151665] + eos_token_id))
|
| 57 |
+
else:
|
| 58 |
+
eos_token_id = [151643, 151645, 151665, eos_token_id]
|
| 59 |
+
else:
|
| 60 |
+
eos_token_id = [151643, 151645, 151665]
|
| 61 |
+
|
| 62 |
+
super().__init__(
|
| 63 |
+
eos_token_id=eos_token_id,
|
| 64 |
+
**kwargs)
|
| 65 |
+
|
| 66 |
+
self.vocab_size = vocab_size
|
| 67 |
+
self.hidden_size = hidden_size
|
| 68 |
+
self.intermediate_size = intermediate_size
|
| 69 |
+
self.num_hidden_layers = num_hidden_layers
|
| 70 |
+
self.num_attention_heads = num_attention_heads
|
| 71 |
+
self.num_attention_groups = num_attention_groups
|
| 72 |
+
self.num_key_value_heads = num_key_value_heads
|
| 73 |
+
assert self.num_attention_groups == self.num_key_value_heads, "num_attention_groups must be equal to num_key_value_heads"
|
| 74 |
+
self.hidden_act = hidden_act
|
| 75 |
+
self.max_position_embeddings = max_position_embeddings
|
| 76 |
+
self.initializer_range = initializer_range
|
| 77 |
+
self.rms_norm_eps = rms_norm_eps
|
| 78 |
+
self.rope_theta = rope_theta
|
| 79 |
+
self.rope_scaling = rope_scaling
|
| 80 |
+
|
| 81 |
+
self.text_config = Qwen2Config(
|
| 82 |
+
vocab_size=vocab_size,
|
| 83 |
+
hidden_size=hidden_size,
|
| 84 |
+
intermediate_size=intermediate_size,
|
| 85 |
+
num_hidden_layers=num_hidden_layers,
|
| 86 |
+
num_attention_heads=num_attention_heads,
|
| 87 |
+
num_key_value_heads=num_key_value_heads,
|
| 88 |
+
hidden_act=hidden_act,
|
| 89 |
+
max_position_embeddings=max_position_embeddings,
|
| 90 |
+
initializer_range=initializer_range,
|
| 91 |
+
rms_norm_eps=rms_norm_eps,
|
| 92 |
+
rope_theta=rope_theta,
|
| 93 |
+
rope_scaling=rope_scaling,
|
| 94 |
+
architectures=["Qwen2ForCausalLM"],
|
| 95 |
+
torch_dtype=getattr(self, "torch_dtype", "bfloat16"),
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
class StepAudio2Config(PretrainedConfig):
|
| 99 |
+
model_type = "step_audio_2"
|
| 100 |
+
architectures = ["StepAudio2ForCausalLM"]
|
| 101 |
+
|
| 102 |
+
def __init__(
|
| 103 |
+
self,
|
| 104 |
+
audio_encoder_config :Optional[Union[dict, StepAudio2EncoderConfig]] = None,
|
| 105 |
+
text_config: Optional[Union[dict, StepAudio2TextConfig]] = None,
|
| 106 |
+
use_sliding_window: bool = False,
|
| 107 |
+
sliding_window: Optional[int] = 2048,
|
| 108 |
+
max_window_layers: Optional[int] = None,
|
| 109 |
+
**kwargs
|
| 110 |
+
):
|
| 111 |
+
kwargs.setdefault("use_sliding_window", use_sliding_window)
|
| 112 |
+
kwargs.setdefault("sliding_window", sliding_window)
|
| 113 |
+
if max_window_layers is None:
|
| 114 |
+
max_window_layers = kwargs.get("num_hidden_layers", None)
|
| 115 |
+
kwargs.setdefault("max_window_layers", max_window_layers)
|
| 116 |
+
super().__init__(**kwargs)
|
| 117 |
+
|
| 118 |
+
if text_config is None:
|
| 119 |
+
text_config = StepAudio2TextConfig().text_config
|
| 120 |
+
elif isinstance(text_config, dict):
|
| 121 |
+
text_config = StepAudio2TextConfig(**text_config).text_config
|
| 122 |
+
|
| 123 |
+
self.text_config = text_config
|
| 124 |
+
|
| 125 |
+
if audio_encoder_config is None:
|
| 126 |
+
self.audio_encoder_config = StepAudio2EncoderConfig()
|
| 127 |
+
elif isinstance(audio_encoder_config, dict):
|
| 128 |
+
self.audio_encoder_config = StepAudio2EncoderConfig(**audio_encoder_config)
|
models/Step-Audio-2-mini/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
models/Step-Audio-2-mini/model-00001-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7b88e02b0b8c643412ec68cae009b3952dbd8e27642d61626065a2c420a8b73c
|
| 3 |
+
size 4925370984
|
models/Step-Audio-2-mini/model-00002-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d412c8d2fc17ca3351751f3171d48ff5b139af623aa05749062f132ac2585f1
|
| 3 |
+
size 4932751008
|
models/Step-Audio-2-mini/model-00003-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:135ae4a891350e8ebf9791ef073d310314e1f75192bece0971bfab7b86c5587c
|
| 3 |
+
size 4988307424
|
models/Step-Audio-2-mini/model-00004-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d35bf0ec42ff9ec160dfc6c5cb20a65247f0f8ba1c6edc620398c2ef49a66295
|
| 3 |
+
size 1784019520
|
models/Step-Audio-2-mini/model.safetensors.index.json
ADDED
|
@@ -0,0 +1,839 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 16630358528
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"adapter.conv.bias": "model-00004-of-00004.safetensors",
|
| 7 |
+
"adapter.conv.weight": "model-00004-of-00004.safetensors",
|
| 8 |
+
"adapter.linear1.bias": "model-00004-of-00004.safetensors",
|
| 9 |
+
"adapter.linear1.weight": "model-00004-of-00004.safetensors",
|
| 10 |
+
"adapter.linear2.bias": "model-00004-of-00004.safetensors",
|
| 11 |
+
"adapter.linear2.weight": "model-00004-of-00004.safetensors",
|
| 12 |
+
"encoder.after_norm.bias": "model-00004-of-00004.safetensors",
|
| 13 |
+
"encoder.after_norm.weight": "model-00004-of-00004.safetensors",
|
| 14 |
+
"encoder.blocks.0.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 15 |
+
"encoder.blocks.0.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 16 |
+
"encoder.blocks.0.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 17 |
+
"encoder.blocks.0.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 18 |
+
"encoder.blocks.0.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 19 |
+
"encoder.blocks.0.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 20 |
+
"encoder.blocks.0.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 21 |
+
"encoder.blocks.0.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 22 |
+
"encoder.blocks.0.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 23 |
+
"encoder.blocks.0.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 24 |
+
"encoder.blocks.0.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 25 |
+
"encoder.blocks.0.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 26 |
+
"encoder.blocks.0.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 27 |
+
"encoder.blocks.0.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 28 |
+
"encoder.blocks.0.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 29 |
+
"encoder.blocks.1.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 30 |
+
"encoder.blocks.1.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 31 |
+
"encoder.blocks.1.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 32 |
+
"encoder.blocks.1.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 33 |
+
"encoder.blocks.1.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 34 |
+
"encoder.blocks.1.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 35 |
+
"encoder.blocks.1.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 36 |
+
"encoder.blocks.1.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 37 |
+
"encoder.blocks.1.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 38 |
+
"encoder.blocks.1.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 39 |
+
"encoder.blocks.1.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 40 |
+
"encoder.blocks.1.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 41 |
+
"encoder.blocks.1.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 42 |
+
"encoder.blocks.1.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 43 |
+
"encoder.blocks.1.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 44 |
+
"encoder.blocks.10.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 45 |
+
"encoder.blocks.10.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 46 |
+
"encoder.blocks.10.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 47 |
+
"encoder.blocks.10.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 48 |
+
"encoder.blocks.10.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 49 |
+
"encoder.blocks.10.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 50 |
+
"encoder.blocks.10.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 51 |
+
"encoder.blocks.10.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 52 |
+
"encoder.blocks.10.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 53 |
+
"encoder.blocks.10.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 54 |
+
"encoder.blocks.10.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 55 |
+
"encoder.blocks.10.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 56 |
+
"encoder.blocks.10.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 57 |
+
"encoder.blocks.10.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 58 |
+
"encoder.blocks.10.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 59 |
+
"encoder.blocks.11.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 60 |
+
"encoder.blocks.11.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 61 |
+
"encoder.blocks.11.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 62 |
+
"encoder.blocks.11.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 63 |
+
"encoder.blocks.11.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 64 |
+
"encoder.blocks.11.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 65 |
+
"encoder.blocks.11.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 66 |
+
"encoder.blocks.11.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 67 |
+
"encoder.blocks.11.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 68 |
+
"encoder.blocks.11.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 69 |
+
"encoder.blocks.11.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 70 |
+
"encoder.blocks.11.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 71 |
+
"encoder.blocks.11.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 72 |
+
"encoder.blocks.11.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 73 |
+
"encoder.blocks.11.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 74 |
+
"encoder.blocks.12.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 75 |
+
"encoder.blocks.12.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 76 |
+
"encoder.blocks.12.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 77 |
+
"encoder.blocks.12.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 78 |
+
"encoder.blocks.12.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 79 |
+
"encoder.blocks.12.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 80 |
+
"encoder.blocks.12.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 81 |
+
"encoder.blocks.12.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 82 |
+
"encoder.blocks.12.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 83 |
+
"encoder.blocks.12.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 84 |
+
"encoder.blocks.12.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 85 |
+
"encoder.blocks.12.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 86 |
+
"encoder.blocks.12.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 87 |
+
"encoder.blocks.12.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 88 |
+
"encoder.blocks.12.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 89 |
+
"encoder.blocks.13.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 90 |
+
"encoder.blocks.13.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 91 |
+
"encoder.blocks.13.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 92 |
+
"encoder.blocks.13.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 93 |
+
"encoder.blocks.13.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 94 |
+
"encoder.blocks.13.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 95 |
+
"encoder.blocks.13.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 96 |
+
"encoder.blocks.13.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 97 |
+
"encoder.blocks.13.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 98 |
+
"encoder.blocks.13.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 99 |
+
"encoder.blocks.13.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 100 |
+
"encoder.blocks.13.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 101 |
+
"encoder.blocks.13.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 102 |
+
"encoder.blocks.13.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 103 |
+
"encoder.blocks.13.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 104 |
+
"encoder.blocks.14.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 105 |
+
"encoder.blocks.14.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 106 |
+
"encoder.blocks.14.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 107 |
+
"encoder.blocks.14.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 108 |
+
"encoder.blocks.14.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 109 |
+
"encoder.blocks.14.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 110 |
+
"encoder.blocks.14.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 111 |
+
"encoder.blocks.14.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 112 |
+
"encoder.blocks.14.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 113 |
+
"encoder.blocks.14.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 114 |
+
"encoder.blocks.14.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 115 |
+
"encoder.blocks.14.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 116 |
+
"encoder.blocks.14.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 117 |
+
"encoder.blocks.14.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 118 |
+
"encoder.blocks.14.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 119 |
+
"encoder.blocks.15.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 120 |
+
"encoder.blocks.15.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 121 |
+
"encoder.blocks.15.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 122 |
+
"encoder.blocks.15.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 123 |
+
"encoder.blocks.15.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 124 |
+
"encoder.blocks.15.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 125 |
+
"encoder.blocks.15.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 126 |
+
"encoder.blocks.15.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 127 |
+
"encoder.blocks.15.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 128 |
+
"encoder.blocks.15.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 129 |
+
"encoder.blocks.15.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 130 |
+
"encoder.blocks.15.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 131 |
+
"encoder.blocks.15.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 132 |
+
"encoder.blocks.15.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 133 |
+
"encoder.blocks.15.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 134 |
+
"encoder.blocks.16.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 135 |
+
"encoder.blocks.16.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 136 |
+
"encoder.blocks.16.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 137 |
+
"encoder.blocks.16.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 138 |
+
"encoder.blocks.16.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 139 |
+
"encoder.blocks.16.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 140 |
+
"encoder.blocks.16.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 141 |
+
"encoder.blocks.16.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 142 |
+
"encoder.blocks.16.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 143 |
+
"encoder.blocks.16.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 144 |
+
"encoder.blocks.16.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 145 |
+
"encoder.blocks.16.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 146 |
+
"encoder.blocks.16.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 147 |
+
"encoder.blocks.16.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 148 |
+
"encoder.blocks.16.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 149 |
+
"encoder.blocks.17.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 150 |
+
"encoder.blocks.17.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 151 |
+
"encoder.blocks.17.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 152 |
+
"encoder.blocks.17.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 153 |
+
"encoder.blocks.17.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 154 |
+
"encoder.blocks.17.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 155 |
+
"encoder.blocks.17.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 156 |
+
"encoder.blocks.17.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 157 |
+
"encoder.blocks.17.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 158 |
+
"encoder.blocks.17.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 159 |
+
"encoder.blocks.17.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 160 |
+
"encoder.blocks.17.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 161 |
+
"encoder.blocks.17.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 162 |
+
"encoder.blocks.17.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 163 |
+
"encoder.blocks.17.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 164 |
+
"encoder.blocks.18.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 165 |
+
"encoder.blocks.18.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 166 |
+
"encoder.blocks.18.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 167 |
+
"encoder.blocks.18.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 168 |
+
"encoder.blocks.18.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 169 |
+
"encoder.blocks.18.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 170 |
+
"encoder.blocks.18.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 171 |
+
"encoder.blocks.18.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 172 |
+
"encoder.blocks.18.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 173 |
+
"encoder.blocks.18.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 174 |
+
"encoder.blocks.18.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 175 |
+
"encoder.blocks.18.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 176 |
+
"encoder.blocks.18.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 177 |
+
"encoder.blocks.18.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 178 |
+
"encoder.blocks.18.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 179 |
+
"encoder.blocks.19.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 180 |
+
"encoder.blocks.19.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 181 |
+
"encoder.blocks.19.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 182 |
+
"encoder.blocks.19.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 183 |
+
"encoder.blocks.19.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 184 |
+
"encoder.blocks.19.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 185 |
+
"encoder.blocks.19.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 186 |
+
"encoder.blocks.19.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 187 |
+
"encoder.blocks.19.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 188 |
+
"encoder.blocks.19.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 189 |
+
"encoder.blocks.19.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 190 |
+
"encoder.blocks.19.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 191 |
+
"encoder.blocks.19.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 192 |
+
"encoder.blocks.19.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 193 |
+
"encoder.blocks.19.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 194 |
+
"encoder.blocks.2.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 195 |
+
"encoder.blocks.2.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 196 |
+
"encoder.blocks.2.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 197 |
+
"encoder.blocks.2.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 198 |
+
"encoder.blocks.2.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 199 |
+
"encoder.blocks.2.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 200 |
+
"encoder.blocks.2.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 201 |
+
"encoder.blocks.2.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 202 |
+
"encoder.blocks.2.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 203 |
+
"encoder.blocks.2.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 204 |
+
"encoder.blocks.2.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 205 |
+
"encoder.blocks.2.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 206 |
+
"encoder.blocks.2.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 207 |
+
"encoder.blocks.2.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 208 |
+
"encoder.blocks.2.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 209 |
+
"encoder.blocks.20.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 210 |
+
"encoder.blocks.20.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 211 |
+
"encoder.blocks.20.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 212 |
+
"encoder.blocks.20.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 213 |
+
"encoder.blocks.20.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 214 |
+
"encoder.blocks.20.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 215 |
+
"encoder.blocks.20.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 216 |
+
"encoder.blocks.20.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 217 |
+
"encoder.blocks.20.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 218 |
+
"encoder.blocks.20.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 219 |
+
"encoder.blocks.20.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 220 |
+
"encoder.blocks.20.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 221 |
+
"encoder.blocks.20.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 222 |
+
"encoder.blocks.20.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 223 |
+
"encoder.blocks.20.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 224 |
+
"encoder.blocks.21.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 225 |
+
"encoder.blocks.21.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 226 |
+
"encoder.blocks.21.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 227 |
+
"encoder.blocks.21.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 228 |
+
"encoder.blocks.21.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 229 |
+
"encoder.blocks.21.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 230 |
+
"encoder.blocks.21.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 231 |
+
"encoder.blocks.21.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 232 |
+
"encoder.blocks.21.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 233 |
+
"encoder.blocks.21.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 234 |
+
"encoder.blocks.21.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 235 |
+
"encoder.blocks.21.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 236 |
+
"encoder.blocks.21.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 237 |
+
"encoder.blocks.21.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 238 |
+
"encoder.blocks.21.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 239 |
+
"encoder.blocks.22.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 240 |
+
"encoder.blocks.22.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 241 |
+
"encoder.blocks.22.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 242 |
+
"encoder.blocks.22.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 243 |
+
"encoder.blocks.22.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 244 |
+
"encoder.blocks.22.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 245 |
+
"encoder.blocks.22.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 246 |
+
"encoder.blocks.22.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 247 |
+
"encoder.blocks.22.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 248 |
+
"encoder.blocks.22.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 249 |
+
"encoder.blocks.22.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 250 |
+
"encoder.blocks.22.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 251 |
+
"encoder.blocks.22.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 252 |
+
"encoder.blocks.22.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 253 |
+
"encoder.blocks.22.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 254 |
+
"encoder.blocks.23.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 255 |
+
"encoder.blocks.23.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 256 |
+
"encoder.blocks.23.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 257 |
+
"encoder.blocks.23.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 258 |
+
"encoder.blocks.23.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 259 |
+
"encoder.blocks.23.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 260 |
+
"encoder.blocks.23.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 261 |
+
"encoder.blocks.23.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 262 |
+
"encoder.blocks.23.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 263 |
+
"encoder.blocks.23.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 264 |
+
"encoder.blocks.23.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 265 |
+
"encoder.blocks.23.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 266 |
+
"encoder.blocks.23.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 267 |
+
"encoder.blocks.23.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 268 |
+
"encoder.blocks.23.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 269 |
+
"encoder.blocks.24.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 270 |
+
"encoder.blocks.24.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 271 |
+
"encoder.blocks.24.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 272 |
+
"encoder.blocks.24.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 273 |
+
"encoder.blocks.24.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 274 |
+
"encoder.blocks.24.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 275 |
+
"encoder.blocks.24.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 276 |
+
"encoder.blocks.24.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 277 |
+
"encoder.blocks.24.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 278 |
+
"encoder.blocks.24.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 279 |
+
"encoder.blocks.24.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 280 |
+
"encoder.blocks.24.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 281 |
+
"encoder.blocks.24.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 282 |
+
"encoder.blocks.24.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 283 |
+
"encoder.blocks.24.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 284 |
+
"encoder.blocks.25.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 285 |
+
"encoder.blocks.25.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 286 |
+
"encoder.blocks.25.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 287 |
+
"encoder.blocks.25.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 288 |
+
"encoder.blocks.25.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 289 |
+
"encoder.blocks.25.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 290 |
+
"encoder.blocks.25.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 291 |
+
"encoder.blocks.25.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 292 |
+
"encoder.blocks.25.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 293 |
+
"encoder.blocks.25.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 294 |
+
"encoder.blocks.25.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 295 |
+
"encoder.blocks.25.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 296 |
+
"encoder.blocks.25.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 297 |
+
"encoder.blocks.25.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 298 |
+
"encoder.blocks.25.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 299 |
+
"encoder.blocks.26.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 300 |
+
"encoder.blocks.26.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 301 |
+
"encoder.blocks.26.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 302 |
+
"encoder.blocks.26.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 303 |
+
"encoder.blocks.26.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 304 |
+
"encoder.blocks.26.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 305 |
+
"encoder.blocks.26.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 306 |
+
"encoder.blocks.26.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 307 |
+
"encoder.blocks.26.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 308 |
+
"encoder.blocks.26.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 309 |
+
"encoder.blocks.26.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 310 |
+
"encoder.blocks.26.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 311 |
+
"encoder.blocks.26.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 312 |
+
"encoder.blocks.26.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 313 |
+
"encoder.blocks.26.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 314 |
+
"encoder.blocks.27.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 315 |
+
"encoder.blocks.27.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 316 |
+
"encoder.blocks.27.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 317 |
+
"encoder.blocks.27.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 318 |
+
"encoder.blocks.27.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 319 |
+
"encoder.blocks.27.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 320 |
+
"encoder.blocks.27.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 321 |
+
"encoder.blocks.27.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 322 |
+
"encoder.blocks.27.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 323 |
+
"encoder.blocks.27.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 324 |
+
"encoder.blocks.27.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 325 |
+
"encoder.blocks.27.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 326 |
+
"encoder.blocks.27.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 327 |
+
"encoder.blocks.27.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 328 |
+
"encoder.blocks.27.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 329 |
+
"encoder.blocks.28.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 330 |
+
"encoder.blocks.28.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 331 |
+
"encoder.blocks.28.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 332 |
+
"encoder.blocks.28.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 333 |
+
"encoder.blocks.28.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 334 |
+
"encoder.blocks.28.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 335 |
+
"encoder.blocks.28.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 336 |
+
"encoder.blocks.28.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 337 |
+
"encoder.blocks.28.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 338 |
+
"encoder.blocks.28.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 339 |
+
"encoder.blocks.28.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 340 |
+
"encoder.blocks.28.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 341 |
+
"encoder.blocks.28.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 342 |
+
"encoder.blocks.28.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 343 |
+
"encoder.blocks.28.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 344 |
+
"encoder.blocks.29.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 345 |
+
"encoder.blocks.29.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 346 |
+
"encoder.blocks.29.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 347 |
+
"encoder.blocks.29.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 348 |
+
"encoder.blocks.29.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 349 |
+
"encoder.blocks.29.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 350 |
+
"encoder.blocks.29.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 351 |
+
"encoder.blocks.29.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 352 |
+
"encoder.blocks.29.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 353 |
+
"encoder.blocks.29.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 354 |
+
"encoder.blocks.29.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 355 |
+
"encoder.blocks.29.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 356 |
+
"encoder.blocks.29.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 357 |
+
"encoder.blocks.29.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 358 |
+
"encoder.blocks.29.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 359 |
+
"encoder.blocks.3.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 360 |
+
"encoder.blocks.3.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 361 |
+
"encoder.blocks.3.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 362 |
+
"encoder.blocks.3.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 363 |
+
"encoder.blocks.3.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 364 |
+
"encoder.blocks.3.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 365 |
+
"encoder.blocks.3.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 366 |
+
"encoder.blocks.3.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 367 |
+
"encoder.blocks.3.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 368 |
+
"encoder.blocks.3.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 369 |
+
"encoder.blocks.3.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 370 |
+
"encoder.blocks.3.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 371 |
+
"encoder.blocks.3.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 372 |
+
"encoder.blocks.3.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 373 |
+
"encoder.blocks.3.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 374 |
+
"encoder.blocks.30.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 375 |
+
"encoder.blocks.30.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 376 |
+
"encoder.blocks.30.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 377 |
+
"encoder.blocks.30.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 378 |
+
"encoder.blocks.30.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 379 |
+
"encoder.blocks.30.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 380 |
+
"encoder.blocks.30.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 381 |
+
"encoder.blocks.30.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 382 |
+
"encoder.blocks.30.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 383 |
+
"encoder.blocks.30.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 384 |
+
"encoder.blocks.30.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 385 |
+
"encoder.blocks.30.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 386 |
+
"encoder.blocks.30.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 387 |
+
"encoder.blocks.30.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 388 |
+
"encoder.blocks.30.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 389 |
+
"encoder.blocks.31.attn.key.weight": "model-00004-of-00004.safetensors",
|
| 390 |
+
"encoder.blocks.31.attn.out.bias": "model-00004-of-00004.safetensors",
|
| 391 |
+
"encoder.blocks.31.attn.out.weight": "model-00004-of-00004.safetensors",
|
| 392 |
+
"encoder.blocks.31.attn.query.bias": "model-00004-of-00004.safetensors",
|
| 393 |
+
"encoder.blocks.31.attn.query.weight": "model-00004-of-00004.safetensors",
|
| 394 |
+
"encoder.blocks.31.attn.value.bias": "model-00004-of-00004.safetensors",
|
| 395 |
+
"encoder.blocks.31.attn.value.weight": "model-00004-of-00004.safetensors",
|
| 396 |
+
"encoder.blocks.31.attn_ln.bias": "model-00004-of-00004.safetensors",
|
| 397 |
+
"encoder.blocks.31.attn_ln.weight": "model-00004-of-00004.safetensors",
|
| 398 |
+
"encoder.blocks.31.mlp.0.bias": "model-00004-of-00004.safetensors",
|
| 399 |
+
"encoder.blocks.31.mlp.0.weight": "model-00004-of-00004.safetensors",
|
| 400 |
+
"encoder.blocks.31.mlp.2.bias": "model-00004-of-00004.safetensors",
|
| 401 |
+
"encoder.blocks.31.mlp.2.weight": "model-00004-of-00004.safetensors",
|
| 402 |
+
"encoder.blocks.31.mlp_ln.bias": "model-00004-of-00004.safetensors",
|
| 403 |
+
"encoder.blocks.31.mlp_ln.weight": "model-00004-of-00004.safetensors",
|
| 404 |
+
"encoder.blocks.4.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 405 |
+
"encoder.blocks.4.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 406 |
+
"encoder.blocks.4.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 407 |
+
"encoder.blocks.4.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 408 |
+
"encoder.blocks.4.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 409 |
+
"encoder.blocks.4.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 410 |
+
"encoder.blocks.4.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 411 |
+
"encoder.blocks.4.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 412 |
+
"encoder.blocks.4.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 413 |
+
"encoder.blocks.4.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 414 |
+
"encoder.blocks.4.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 415 |
+
"encoder.blocks.4.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 416 |
+
"encoder.blocks.4.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 417 |
+
"encoder.blocks.4.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 418 |
+
"encoder.blocks.4.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 419 |
+
"encoder.blocks.5.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 420 |
+
"encoder.blocks.5.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 421 |
+
"encoder.blocks.5.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 422 |
+
"encoder.blocks.5.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 423 |
+
"encoder.blocks.5.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 424 |
+
"encoder.blocks.5.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 425 |
+
"encoder.blocks.5.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 426 |
+
"encoder.blocks.5.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 427 |
+
"encoder.blocks.5.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 428 |
+
"encoder.blocks.5.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 429 |
+
"encoder.blocks.5.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 430 |
+
"encoder.blocks.5.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 431 |
+
"encoder.blocks.5.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 432 |
+
"encoder.blocks.5.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 433 |
+
"encoder.blocks.5.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 434 |
+
"encoder.blocks.6.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 435 |
+
"encoder.blocks.6.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 436 |
+
"encoder.blocks.6.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 437 |
+
"encoder.blocks.6.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 438 |
+
"encoder.blocks.6.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 439 |
+
"encoder.blocks.6.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 440 |
+
"encoder.blocks.6.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 441 |
+
"encoder.blocks.6.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 442 |
+
"encoder.blocks.6.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 443 |
+
"encoder.blocks.6.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 444 |
+
"encoder.blocks.6.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 445 |
+
"encoder.blocks.6.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 446 |
+
"encoder.blocks.6.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 447 |
+
"encoder.blocks.6.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 448 |
+
"encoder.blocks.6.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 449 |
+
"encoder.blocks.7.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 450 |
+
"encoder.blocks.7.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 451 |
+
"encoder.blocks.7.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 452 |
+
"encoder.blocks.7.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 453 |
+
"encoder.blocks.7.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 454 |
+
"encoder.blocks.7.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 455 |
+
"encoder.blocks.7.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 456 |
+
"encoder.blocks.7.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 457 |
+
"encoder.blocks.7.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 458 |
+
"encoder.blocks.7.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 459 |
+
"encoder.blocks.7.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 460 |
+
"encoder.blocks.7.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 461 |
+
"encoder.blocks.7.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 462 |
+
"encoder.blocks.7.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 463 |
+
"encoder.blocks.7.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 464 |
+
"encoder.blocks.8.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 465 |
+
"encoder.blocks.8.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 466 |
+
"encoder.blocks.8.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 467 |
+
"encoder.blocks.8.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 468 |
+
"encoder.blocks.8.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 469 |
+
"encoder.blocks.8.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 470 |
+
"encoder.blocks.8.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 471 |
+
"encoder.blocks.8.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 472 |
+
"encoder.blocks.8.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 473 |
+
"encoder.blocks.8.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 474 |
+
"encoder.blocks.8.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 475 |
+
"encoder.blocks.8.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 476 |
+
"encoder.blocks.8.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 477 |
+
"encoder.blocks.8.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 478 |
+
"encoder.blocks.8.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 479 |
+
"encoder.blocks.9.attn.key.weight": "model-00003-of-00004.safetensors",
|
| 480 |
+
"encoder.blocks.9.attn.out.bias": "model-00003-of-00004.safetensors",
|
| 481 |
+
"encoder.blocks.9.attn.out.weight": "model-00003-of-00004.safetensors",
|
| 482 |
+
"encoder.blocks.9.attn.query.bias": "model-00003-of-00004.safetensors",
|
| 483 |
+
"encoder.blocks.9.attn.query.weight": "model-00003-of-00004.safetensors",
|
| 484 |
+
"encoder.blocks.9.attn.value.bias": "model-00003-of-00004.safetensors",
|
| 485 |
+
"encoder.blocks.9.attn.value.weight": "model-00003-of-00004.safetensors",
|
| 486 |
+
"encoder.blocks.9.attn_ln.bias": "model-00003-of-00004.safetensors",
|
| 487 |
+
"encoder.blocks.9.attn_ln.weight": "model-00003-of-00004.safetensors",
|
| 488 |
+
"encoder.blocks.9.mlp.0.bias": "model-00003-of-00004.safetensors",
|
| 489 |
+
"encoder.blocks.9.mlp.0.weight": "model-00003-of-00004.safetensors",
|
| 490 |
+
"encoder.blocks.9.mlp.2.bias": "model-00003-of-00004.safetensors",
|
| 491 |
+
"encoder.blocks.9.mlp.2.weight": "model-00003-of-00004.safetensors",
|
| 492 |
+
"encoder.blocks.9.mlp_ln.bias": "model-00003-of-00004.safetensors",
|
| 493 |
+
"encoder.blocks.9.mlp_ln.weight": "model-00003-of-00004.safetensors",
|
| 494 |
+
"encoder.conv1.bias": "model-00003-of-00004.safetensors",
|
| 495 |
+
"encoder.conv1.weight": "model-00003-of-00004.safetensors",
|
| 496 |
+
"encoder.conv2.bias": "model-00003-of-00004.safetensors",
|
| 497 |
+
"encoder.conv2.weight": "model-00003-of-00004.safetensors",
|
| 498 |
+
"encoder.positional_embedding.weight": "model-00003-of-00004.safetensors",
|
| 499 |
+
"lm_head.weight": "model-00004-of-00004.safetensors",
|
| 500 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
| 501 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 502 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 503 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 504 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 505 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 506 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 507 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 508 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 509 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 510 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 511 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 512 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 513 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 514 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 515 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 516 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 517 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 518 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 519 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 520 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 521 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 522 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 523 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 524 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 525 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 526 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 527 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 528 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 529 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 530 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 531 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 532 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 533 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 534 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 535 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 536 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 537 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 538 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 539 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 540 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 541 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 542 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 543 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 544 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 545 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 546 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 547 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 548 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 549 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 550 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 551 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 552 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 553 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 554 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 555 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 556 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 557 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 558 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 559 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 560 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 561 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 562 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 563 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 564 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 565 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 566 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 567 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 568 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 569 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 570 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 571 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 572 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 573 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 574 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 575 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 576 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 577 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 578 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 579 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 580 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 581 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 582 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 583 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 584 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 585 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 586 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 587 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 588 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 589 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 590 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 591 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 592 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 593 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 594 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 595 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 596 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 597 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 598 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 599 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 600 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 601 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 602 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 603 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 604 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 605 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 606 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 607 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 608 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 609 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 610 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 611 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 612 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 613 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 614 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 615 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 616 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 617 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 618 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 619 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 620 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 621 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 622 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 623 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 624 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 625 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 626 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 627 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 628 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 629 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 630 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 631 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 632 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 633 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 634 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 635 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 636 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 637 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 638 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 639 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 640 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 641 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 642 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 643 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 644 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 645 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 646 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 647 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 648 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 649 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 650 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 651 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 652 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 653 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 654 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 655 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 656 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 657 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 658 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 659 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 660 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 661 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 662 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 663 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 664 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 665 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 666 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 667 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 668 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 669 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 670 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 671 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 672 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 673 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 674 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 675 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 676 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 677 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 678 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 679 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 680 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 681 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 682 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 683 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 684 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 685 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 686 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 687 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 688 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 689 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 690 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 691 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 692 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 693 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 694 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 695 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 696 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 697 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 698 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 699 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 700 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 701 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 702 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 703 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 704 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 705 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 706 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 707 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 708 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 709 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 710 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 711 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 712 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 713 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 714 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 715 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 716 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 717 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 718 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 719 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 720 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 721 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 722 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 723 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 724 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 725 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 726 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 727 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 728 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 729 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 730 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 731 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 732 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 733 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 734 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 735 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 736 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 737 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 738 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 739 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 740 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 741 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 742 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 743 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 744 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 745 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 746 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 747 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 748 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 749 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 750 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 751 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 752 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 753 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 754 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 755 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 756 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 757 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 758 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 759 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 760 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 761 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 762 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 763 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 764 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 765 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 766 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 767 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 768 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 769 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 770 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 771 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 772 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 773 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 774 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 775 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 776 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 777 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 778 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 779 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 780 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 781 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 782 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 783 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 784 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 785 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 786 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 787 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 788 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 789 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 790 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 791 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 792 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 793 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 794 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 795 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 796 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 797 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 798 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 799 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 800 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 801 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 802 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 803 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 804 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 805 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 806 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 807 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 808 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 809 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 810 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 811 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 812 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 813 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 814 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 815 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 816 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 817 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 818 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 819 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 820 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 821 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 822 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 823 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 824 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 825 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 826 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 827 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 828 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 829 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 830 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 831 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 832 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 833 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 834 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 835 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 836 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 837 |
+
"model.norm.weight": "model-00003-of-00004.safetensors"
|
| 838 |
+
}
|
| 839 |
+
}
|
models/Step-Audio-2-mini/modeling_step_audio_2.py
ADDED
|
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Iterable, Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import librosa
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
import torchaudio
|
| 7 |
+
from torch import Tensor, nn
|
| 8 |
+
from transformers import PreTrainedModel, Qwen2Model
|
| 9 |
+
from transformers.generation.utils import GenerationMixin
|
| 10 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 11 |
+
|
| 12 |
+
from .configuration_step_audio_2 import StepAudio2Config
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _mel_filters(n_mels: int) -> torch.Tensor:
|
| 16 |
+
"""Load the mel filterbank matrix for projecting STFT into a Mel spectrogram."""
|
| 17 |
+
assert n_mels in {80, 128}, f"Unsupported n_mels: {n_mels}"
|
| 18 |
+
if n_mels == 128:
|
| 19 |
+
return torch.from_numpy(librosa.filters.mel(sr=16000, n_fft=400, n_mels=128))
|
| 20 |
+
else:
|
| 21 |
+
return torch.from_numpy(librosa.filters.mel(sr=16000, n_fft=400, n_mels=80))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def load_audio(file_path, target_rate=16000, max_length=None):
|
| 25 |
+
"""
|
| 26 |
+
Open an audio file and read as mono waveform, resampling as necessary
|
| 27 |
+
If max_length is provided, truncate the audio to that length
|
| 28 |
+
"""
|
| 29 |
+
waveform, sample_rate = torchaudio.load(file_path)
|
| 30 |
+
if sample_rate != target_rate:
|
| 31 |
+
waveform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_rate)(waveform)
|
| 32 |
+
audio = waveform[0] # get the first channel
|
| 33 |
+
|
| 34 |
+
# Truncate audio if it exceeds max_length
|
| 35 |
+
if max_length is not None and audio.shape[0] > max_length:
|
| 36 |
+
audio = audio[:max_length]
|
| 37 |
+
|
| 38 |
+
return audio
|
| 39 |
+
|
| 40 |
+
def log_mel_spectrogram(audio, n_mels=128, padding=479, device=None):
|
| 41 |
+
"""
|
| 42 |
+
Compute the log-Mel spectrogram with specific padding for StepAudio
|
| 43 |
+
"""
|
| 44 |
+
if not torch.is_tensor(audio):
|
| 45 |
+
if isinstance(audio, str):
|
| 46 |
+
audio = load_audio(audio)
|
| 47 |
+
audio = torch.from_numpy(audio)
|
| 48 |
+
if device is not None:
|
| 49 |
+
audio = audio.to(device)
|
| 50 |
+
if padding > 0:
|
| 51 |
+
audio = F.pad(audio, (0, padding))
|
| 52 |
+
window = torch.hann_window(400).to(audio.device)
|
| 53 |
+
stft = torch.stft(audio, 400, 160, window=window, return_complex=True)
|
| 54 |
+
magnitudes = stft[..., :-1].abs() ** 2
|
| 55 |
+
filters = _mel_filters(n_mels)
|
| 56 |
+
mel_spec = filters @ magnitudes
|
| 57 |
+
|
| 58 |
+
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
|
| 59 |
+
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
|
| 60 |
+
log_spec = (log_spec + 4.0) / 4.0
|
| 61 |
+
return log_spec
|
| 62 |
+
|
| 63 |
+
def compute_token_num(max_feature_len):
|
| 64 |
+
# First, audio goes through encoder:
|
| 65 |
+
# 1. conv1: kernel=3, stride=1, padding=1 -> size unchanged
|
| 66 |
+
# 2. conv2: kernel=3, stride=2, padding=1 -> size/2
|
| 67 |
+
# 3. avg_pooler: kernel=2, stride=2 -> size/2
|
| 68 |
+
max_feature_len = max_feature_len - 2 # remove padding
|
| 69 |
+
encoder_output_dim = (max_feature_len + 1) // 2 // 2 # after conv2 and avg_pooler
|
| 70 |
+
|
| 71 |
+
# Then through adaptor (parameters from config file):
|
| 72 |
+
padding = 1
|
| 73 |
+
kernel_size = 3 # from config: audio_encoder_config.kernel_size
|
| 74 |
+
stride = 2 # from config: audio_encoder_config.adapter_stride
|
| 75 |
+
adapter_output_dim = (encoder_output_dim + 2 * padding - kernel_size) // stride + 1
|
| 76 |
+
return adapter_output_dim
|
| 77 |
+
|
| 78 |
+
def make_non_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
|
| 79 |
+
"""Make mask tensor containing indices of non-padded part.
|
| 80 |
+
|
| 81 |
+
The sequences in a batch may have different lengths. To enable
|
| 82 |
+
batch computing, padding is need to make all sequence in same
|
| 83 |
+
size. To avoid the padding part pass value to context dependent
|
| 84 |
+
block such as attention or convolution , this padding part is
|
| 85 |
+
masked.
|
| 86 |
+
|
| 87 |
+
1 for non-padded part and 0 for padded part.
|
| 88 |
+
|
| 89 |
+
Parameters
|
| 90 |
+
----------
|
| 91 |
+
lengths (torch.Tensor): Batch of lengths (B,).
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
-------
|
| 95 |
+
torch.Tensor: Mask tensor containing indices of padded part (B, max_T).
|
| 96 |
+
|
| 97 |
+
Examples:
|
| 98 |
+
>>> import torch
|
| 99 |
+
>>> import s3tokenizer
|
| 100 |
+
>>> lengths = torch.tensor([5, 3, 2])
|
| 101 |
+
>>> masks = s3tokenizer.make_non_pad_mask(lengths)
|
| 102 |
+
masks = [[1, 1, 1, 1, 1],
|
| 103 |
+
[1, 1, 1, 0, 0],
|
| 104 |
+
[1, 1, 0, 0, 0]]
|
| 105 |
+
"""
|
| 106 |
+
batch_size = lengths.size(0)
|
| 107 |
+
max_len = max_len if max_len > 0 else lengths.max().item()
|
| 108 |
+
seq_range = torch.arange(0,
|
| 109 |
+
max_len,
|
| 110 |
+
dtype=torch.int64,
|
| 111 |
+
device=lengths.device)
|
| 112 |
+
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
|
| 113 |
+
seq_length_expand = lengths.unsqueeze(-1)
|
| 114 |
+
mask = seq_range_expand >= seq_length_expand
|
| 115 |
+
return ~mask
|
| 116 |
+
|
| 117 |
+
def mask_to_bias(mask: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
|
| 118 |
+
"""Convert bool-tensor to float-tensor for flash attention.
|
| 119 |
+
|
| 120 |
+
Parameters
|
| 121 |
+
----------
|
| 122 |
+
lengths (torch.Tensor): Batch of lengths (B, ?).
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
-------
|
| 126 |
+
torch.Tensor: Mask tensor containing indices of padded part (B, ?).
|
| 127 |
+
|
| 128 |
+
Examples:
|
| 129 |
+
>>> import torch
|
| 130 |
+
>>> import s3tokenizer
|
| 131 |
+
>>> lengths = torch.tensor([5, 3, 2])
|
| 132 |
+
>>> masks = s3tokenizer.make_non_pad_mask(lengths)
|
| 133 |
+
masks = [[1, 1, 1, 1, 1],
|
| 134 |
+
[1, 1, 1, 0, 0],
|
| 135 |
+
[1, 1, 0, 0, 0]]
|
| 136 |
+
>>> new_masks = s3tokenizer.mask_to_bias(masks, torch.float32)
|
| 137 |
+
new_masks = [[-0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00],
|
| 138 |
+
[-0.0000e+00, -0.0000e+00, -0.0000e+00, -1.0000e+10, -1.0000e+10],
|
| 139 |
+
[-0.0000e+00, -0.0000e+00, -1.0000e+10, -1.0000e+10, -1.0000e+10]]
|
| 140 |
+
"""
|
| 141 |
+
assert mask.dtype == torch.bool
|
| 142 |
+
assert dtype in [torch.float32, torch.bfloat16, torch.float16]
|
| 143 |
+
mask = mask.to(dtype)
|
| 144 |
+
# attention mask bias
|
| 145 |
+
# NOTE(Mddct): torch.finfo jit issues
|
| 146 |
+
# chunk_masks = (1.0 - chunk_masks) * torch.finfo(dtype).min
|
| 147 |
+
mask = (1.0 - mask) * -1.0e+10
|
| 148 |
+
return mask
|
| 149 |
+
|
| 150 |
+
class LayerNorm(nn.LayerNorm):
|
| 151 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 152 |
+
return super().forward(input).type(input.dtype)
|
| 153 |
+
|
| 154 |
+
class Linear(nn.Linear):
|
| 155 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 156 |
+
return F.linear(
|
| 157 |
+
input,
|
| 158 |
+
self.weight.to(input.dtype),
|
| 159 |
+
None if self.bias is None else self.bias.to(input.dtype),
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
class Conv1d(nn.Conv1d):
|
| 163 |
+
def _conv_forward(
|
| 164 |
+
self, input: Tensor, weight: Tensor, bias: Optional[Tensor]
|
| 165 |
+
) -> Tensor:
|
| 166 |
+
return super()._conv_forward(
|
| 167 |
+
input, weight.to(input.dtype), None if bias is None else bias.to(input.dtype)
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
class MultiHeadAttention(nn.Module):
|
| 171 |
+
def __init__(self, n_state: int, n_head: int):
|
| 172 |
+
super().__init__()
|
| 173 |
+
self.n_head = n_head
|
| 174 |
+
self.query = Linear(n_state, n_state)
|
| 175 |
+
self.key = Linear(n_state, n_state, bias=False)
|
| 176 |
+
self.value = Linear(n_state, n_state)
|
| 177 |
+
self.out = Linear(n_state, n_state)
|
| 178 |
+
|
| 179 |
+
def forward(
|
| 180 |
+
self,
|
| 181 |
+
x: Tensor,
|
| 182 |
+
mask: Optional[Tensor] = None,
|
| 183 |
+
):
|
| 184 |
+
q = self.query(x)
|
| 185 |
+
k = self.key(x)
|
| 186 |
+
v = self.value(x)
|
| 187 |
+
|
| 188 |
+
wv, qk = self.qkv_attention(q, k, v, mask)
|
| 189 |
+
return self.out(wv), qk
|
| 190 |
+
|
| 191 |
+
def qkv_attention(
|
| 192 |
+
self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None
|
| 193 |
+
):
|
| 194 |
+
_, T, D = q.shape
|
| 195 |
+
scale = (D // self.n_head) ** -0.25
|
| 196 |
+
q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) * scale
|
| 197 |
+
k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 3, 1) * scale
|
| 198 |
+
v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
|
| 199 |
+
|
| 200 |
+
qk = q @ k # (B, n_head, T, T)
|
| 201 |
+
if mask is not None:
|
| 202 |
+
qk = qk + mask
|
| 203 |
+
qk = qk.float()
|
| 204 |
+
|
| 205 |
+
w = F.softmax(qk, dim=-1).to(q.dtype)
|
| 206 |
+
return (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2), qk.detach()
|
| 207 |
+
|
| 208 |
+
class ResidualAttentionBlock(nn.Module):
|
| 209 |
+
def __init__(self, n_state: int, n_head: int):
|
| 210 |
+
super().__init__()
|
| 211 |
+
|
| 212 |
+
self.attn = MultiHeadAttention(n_state, n_head)
|
| 213 |
+
self.attn_ln = LayerNorm(n_state)
|
| 214 |
+
|
| 215 |
+
n_mlp = n_state * 4
|
| 216 |
+
self.mlp = nn.Sequential(
|
| 217 |
+
Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state)
|
| 218 |
+
)
|
| 219 |
+
self.mlp_ln = LayerNorm(n_state)
|
| 220 |
+
|
| 221 |
+
def forward(
|
| 222 |
+
self,
|
| 223 |
+
x: Tensor,
|
| 224 |
+
mask: Optional[Tensor] = None,
|
| 225 |
+
):
|
| 226 |
+
x = x + self.attn(self.attn_ln(x.contiguous()), mask=mask)[0]
|
| 227 |
+
x = x + self.mlp(self.mlp_ln(x.contiguous()))
|
| 228 |
+
return x
|
| 229 |
+
|
| 230 |
+
class AudioEncoder(nn.Module):
|
| 231 |
+
def __init__(
|
| 232 |
+
self, n_mels: int, n_ctx: int, n_state: int, n_head: int, n_layer: int
|
| 233 |
+
):
|
| 234 |
+
super().__init__()
|
| 235 |
+
self.conv1 = Conv1d(n_mels, n_state, kernel_size=3, padding=1)
|
| 236 |
+
self.conv2 = Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1)
|
| 237 |
+
self.positional_embedding = nn.Embedding(n_ctx, n_state)
|
| 238 |
+
self.positional_embedding.requires_grad_(False)
|
| 239 |
+
self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList(
|
| 240 |
+
[ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)]
|
| 241 |
+
)
|
| 242 |
+
self.avg_pooler = nn.AvgPool1d(2, stride=2)
|
| 243 |
+
self.after_norm = LayerNorm(n_state)
|
| 244 |
+
self.gradient_checkpointing = False
|
| 245 |
+
|
| 246 |
+
def forward(self, x: Tensor, x_len: Tensor) -> Tuple[Tensor, Tensor]:
|
| 247 |
+
T = x.size(-1)
|
| 248 |
+
x = F.gelu(self.conv1(x))
|
| 249 |
+
x = F.gelu(self.conv2(x))
|
| 250 |
+
x = x.permute(0, 2, 1) # (B, T // 2, n_state)
|
| 251 |
+
mask = make_non_pad_mask(x_len, T).unsqueeze(1) # (B, 1, T)
|
| 252 |
+
mask = mask_to_bias(mask[:, :, (T + 1) % 2::2], x.dtype) # (B, 1, T // 2)
|
| 253 |
+
x = (x + self.positional_embedding.weight[:x.shape[1], :]).to(x.dtype)
|
| 254 |
+
for block in self.blocks:
|
| 255 |
+
if self.gradient_checkpointing and self.training:
|
| 256 |
+
x = torch.utils.checkpoint.checkpoint(block, x, mask.unsqueeze(1))
|
| 257 |
+
else:
|
| 258 |
+
x = block(x, mask.unsqueeze(1))
|
| 259 |
+
x = x.permute(0, 2, 1)
|
| 260 |
+
x = self.avg_pooler(x)
|
| 261 |
+
x = x.permute(0, 2, 1)
|
| 262 |
+
x_len = (x_len + 1) // 2 // 2
|
| 263 |
+
x = self.after_norm(x.contiguous())
|
| 264 |
+
return x, x_len
|
| 265 |
+
|
| 266 |
+
class Adaptor(nn.Module):
|
| 267 |
+
def __init__(
|
| 268 |
+
self,
|
| 269 |
+
n_state: int = 1280,
|
| 270 |
+
n_hidden: int = 3072,
|
| 271 |
+
kernel_size: int = 7,
|
| 272 |
+
stride: int = 4
|
| 273 |
+
):
|
| 274 |
+
super().__init__()
|
| 275 |
+
self.stride = stride
|
| 276 |
+
if self.stride != -1:
|
| 277 |
+
# print("self.stride: {}".format(self.stride))
|
| 278 |
+
self.conv = Conv1d(n_state, n_state, kernel_size, stride, padding=1)
|
| 279 |
+
self.linear1 = nn.Linear(n_state, 2048)
|
| 280 |
+
self.relu = nn.ReLU()
|
| 281 |
+
self.linear2 = nn.Linear(2048, n_hidden)
|
| 282 |
+
self.gradient_checkpointing = False
|
| 283 |
+
|
| 284 |
+
def forward(self, x: Tensor) -> Tuple[Tensor]:
|
| 285 |
+
T = x.size(-1)
|
| 286 |
+
if self.stride != -1:
|
| 287 |
+
if self.gradient_checkpointing and self.training:
|
| 288 |
+
x = torch.utils.checkpoint.checkpoint(self.conv, x.permute(0, 2, 1))
|
| 289 |
+
x = x.permute(0, 2, 1)
|
| 290 |
+
else:
|
| 291 |
+
x = x.permute(0, 2, 1)
|
| 292 |
+
x = F.gelu(self.conv(x))
|
| 293 |
+
x = x.permute(0, 2, 1)
|
| 294 |
+
if self.gradient_checkpointing and self.training:
|
| 295 |
+
x = torch.utils.checkpoint.checkpoint(self.linear1, x)
|
| 296 |
+
x = torch.utils.checkpoint.checkpoint(self.relu, x)
|
| 297 |
+
x = torch.utils.checkpoint.checkpoint(self.linear2, x)
|
| 298 |
+
else:
|
| 299 |
+
x = self.linear1(x)
|
| 300 |
+
x = self.relu(x)
|
| 301 |
+
x = self.linear2(x)
|
| 302 |
+
return x
|
| 303 |
+
|
| 304 |
+
class StepAudio2ForCausalLM(PreTrainedModel, GenerationMixin):
|
| 305 |
+
config_class = StepAudio2Config
|
| 306 |
+
main_input_name = "input_ids"
|
| 307 |
+
# Important: Add this attribute to make HF recognize it as a model with generation capability
|
| 308 |
+
# _keys_to_ignore_on_load_missing = ["lm_head.weight"]
|
| 309 |
+
supports_gradient_checkpointing = True # 新增,声明支持gradient checkpointing
|
| 310 |
+
|
| 311 |
+
def __init__(self, config: StepAudio2Config):
|
| 312 |
+
super().__init__(config)
|
| 313 |
+
if isinstance(config.torch_dtype, str):
|
| 314 |
+
dtype = getattr(torch, config.torch_dtype)
|
| 315 |
+
else:
|
| 316 |
+
dtype = config.torch_dtype
|
| 317 |
+
self.model = Qwen2Model(config.text_config)
|
| 318 |
+
self.bf16 = dtype==torch.bfloat16
|
| 319 |
+
self.encoder = AudioEncoder(
|
| 320 |
+
config.audio_encoder_config.n_mels, config.audio_encoder_config.n_audio_ctx, config.audio_encoder_config.n_audio_state,
|
| 321 |
+
config.audio_encoder_config.n_audio_head, config.audio_encoder_config.n_audio_layer
|
| 322 |
+
)
|
| 323 |
+
self.adapter = Adaptor(
|
| 324 |
+
config.audio_encoder_config.n_audio_state, config.audio_encoder_config.llm_dim,
|
| 325 |
+
config.audio_encoder_config.kernel_size, config.audio_encoder_config.adapter_stride
|
| 326 |
+
)
|
| 327 |
+
if self.bf16:
|
| 328 |
+
self.encoder = self.encoder.bfloat16()
|
| 329 |
+
self.adapter = self.adapter.bfloat16()
|
| 330 |
+
self.lm_head = torch.nn.Linear(
|
| 331 |
+
config.text_config.hidden_size,
|
| 332 |
+
config.text_config.vocab_size,
|
| 333 |
+
bias=False,
|
| 334 |
+
dtype=dtype
|
| 335 |
+
)
|
| 336 |
+
self.post_init()
|
| 337 |
+
|
| 338 |
+
def forward(
|
| 339 |
+
self,
|
| 340 |
+
input_ids=None,
|
| 341 |
+
wavs=None,
|
| 342 |
+
wav_lens=None,
|
| 343 |
+
attention_mask=None,
|
| 344 |
+
**kwargs
|
| 345 |
+
):
|
| 346 |
+
hidden_states = self.model.embed_tokens(input_ids)
|
| 347 |
+
if wavs is not None:
|
| 348 |
+
if self.bf16:
|
| 349 |
+
wavs = wavs.bfloat16()
|
| 350 |
+
out, feat_lens = self.encoder(wavs, wav_lens)
|
| 351 |
+
out = self.adapter(out)
|
| 352 |
+
feat_lens = (feat_lens - 1) // 2 + 1
|
| 353 |
+
insert_location = torch.nonzero(input_ids == 151688)
|
| 354 |
+
insert_location[:,1] += 1
|
| 355 |
+
for idx in range(len(insert_location)):
|
| 356 |
+
i,s = insert_location[idx]
|
| 357 |
+
hidden_states[i][s : s+feat_lens[idx]] = out[idx][:feat_lens[idx]]
|
| 358 |
+
|
| 359 |
+
x = self.model(inputs_embeds=hidden_states, attention_mask=attention_mask)[0]
|
| 360 |
+
logits = self.lm_head(x)
|
| 361 |
+
return CausalLMOutputWithPast(
|
| 362 |
+
logits=logits,
|
| 363 |
+
past_key_values=None,
|
| 364 |
+
hidden_states=None,
|
| 365 |
+
attentions=None
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
def get_input_embeddings(self):
|
| 369 |
+
"""Return the model's input embeddings - required for GenerationMixin"""
|
| 370 |
+
return self.model.embed_tokens
|
| 371 |
+
|
| 372 |
+
def get_output_embeddings(self):
|
| 373 |
+
"""Return the model's output embeddings (LM head) - required for GenerationMixin"""
|
| 374 |
+
return self.lm_head
|
| 375 |
+
|
| 376 |
+
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **kwargs):
|
| 377 |
+
"""Prepare inputs for generation - required for GenerationMixin"""
|
| 378 |
+
# Keep the wavs and wav_lens from the initial call
|
| 379 |
+
wavs = kwargs.get("wavs", None)
|
| 380 |
+
wav_lens = kwargs.get("wav_lens", None)
|
| 381 |
+
|
| 382 |
+
# For generation steps after the first, we don't need to process audio again
|
| 383 |
+
# because the audio tokens have already been replaced in the input sequence
|
| 384 |
+
if "past_key_values" in kwargs and kwargs["past_key_values"] is not None:
|
| 385 |
+
# We're in a generation step, no need to process audio again
|
| 386 |
+
return {
|
| 387 |
+
"input_ids": input_ids,
|
| 388 |
+
"attention_mask": attention_mask,
|
| 389 |
+
"past_key_values": kwargs.get("past_key_values")
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
# First generation step, include audio processing
|
| 393 |
+
return {
|
| 394 |
+
"input_ids": input_ids,
|
| 395 |
+
"attention_mask": attention_mask,
|
| 396 |
+
"wavs": wavs,
|
| 397 |
+
"wav_lens": wav_lens
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
def _reorder_cache(self, past_key_values, beam_idx):
|
| 401 |
+
"""Reorder the cache for beam search - required for GenerationMixin if using beam search"""
|
| 402 |
+
# If you're not using past_key_values or beam search, this can be a simple pass-through
|
| 403 |
+
# Otherwise implement according to your model's cache structure
|
| 404 |
+
return past_key_values
|
| 405 |
+
|
| 406 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 407 |
+
# For Qwen2Model
|
| 408 |
+
if hasattr(self.model, 'gradient_checkpointing'):
|
| 409 |
+
self.model.gradient_checkpointing = value
|
| 410 |
+
|
| 411 |
+
# Add the missing _gradient_checkpointing_func method to Qwen2Model
|
| 412 |
+
# This is what Qwen2Model tries to use when gradient_checkpointing=True
|
| 413 |
+
if value and not hasattr(self.model, '_gradient_checkpointing_func'):
|
| 414 |
+
def _gradient_checkpointing_func(module_to_run, *args, **kwargs):
|
| 415 |
+
# This function wraps torch.utils.checkpoint.checkpoint
|
| 416 |
+
# and is used by Qwen2Model to perform checkpointing
|
| 417 |
+
return torch.utils.checkpoint.checkpoint(module_to_run, *args, **kwargs)
|
| 418 |
+
|
| 419 |
+
self.model._gradient_checkpointing_func = _gradient_checkpointing_func
|
| 420 |
+
|
| 421 |
+
# For custom encoder and adapter
|
| 422 |
+
if hasattr(self.encoder, 'gradient_checkpointing'):
|
| 423 |
+
self.encoder.gradient_checkpointing = value
|
| 424 |
+
if hasattr(self.adapter, 'gradient_checkpointing'):
|
| 425 |
+
self.adapter.gradient_checkpointing = value
|
models/Step-Audio-2-mini/source.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
https://huggingface.co/stepfun-ai/Step-Audio-2-mini
|
models/Step-Audio-2-mini/special_tokens_map.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|EOT|>",
|
| 4 |
+
"<|BOT|>",
|
| 5 |
+
"<|CALL_START|>",
|
| 6 |
+
"<|CALL_END|>",
|
| 7 |
+
"<|THINK_START|>",
|
| 8 |
+
"<|THINK_END|>",
|
| 9 |
+
"<|IMG_START|>",
|
| 10 |
+
"<|IMG_END|>",
|
| 11 |
+
"<|META_START|>",
|
| 12 |
+
"<|META_END|>",
|
| 13 |
+
"<im_patch>",
|
| 14 |
+
"<im_start>",
|
| 15 |
+
"<im_end>",
|
| 16 |
+
"<dream>",
|
| 17 |
+
"<dream_start>",
|
| 18 |
+
"<dream_end>",
|
| 19 |
+
"<|MASK_1e69f|>",
|
| 20 |
+
"<|UNMASK_1e69f|>",
|
| 21 |
+
"<video_start>",
|
| 22 |
+
"<video_end>",
|
| 23 |
+
"<patch_start>",
|
| 24 |
+
"<patch_end>",
|
| 25 |
+
"<patch_newline>",
|
| 26 |
+
"<audio_start>",
|
| 27 |
+
"<audio_end>",
|
| 28 |
+
"<audio_patch>",
|
| 29 |
+
"<audio_patch_pad>",
|
| 30 |
+
"<|SC|>",
|
| 31 |
+
"<tts_start>",
|
| 32 |
+
"<tts_end>",
|
| 33 |
+
"<tts_pad>"
|
| 34 |
+
],
|
| 35 |
+
"eos_token": "<|EOT|>",
|
| 36 |
+
"pad_token": {
|
| 37 |
+
"content": "<|endoftext|>",
|
| 38 |
+
"lstrip": false,
|
| 39 |
+
"normalized": false,
|
| 40 |
+
"rstrip": false,
|
| 41 |
+
"single_word": false
|
| 42 |
+
}
|
| 43 |
+
}
|
models/Step-Audio-2-mini/token2wav/campplus.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a6ac6a63997761ae2997373e2ee1c47040854b4b759ea41ec48e4e42df0f4d73
|
| 3 |
+
size 28303423
|
models/Step-Audio-2-mini/token2wav/flow.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15ccff24256ff61537c7f8b51e025116b83405f3fb017b54b008fc97da115446
|
| 3 |
+
size 623466603
|
models/Step-Audio-2-mini/token2wav/flow.yaml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flow: !new:cosyvoice2.flow.flow.CausalMaskedDiffWithXvec
|
| 2 |
+
input_size: 512
|
| 3 |
+
output_size: 80
|
| 4 |
+
spk_embed_dim: 192
|
| 5 |
+
output_type: 'mel'
|
| 6 |
+
vocab_size: 6561
|
| 7 |
+
encoder: !new:cosyvoice2.transformer.upsample_encoder_v2.UpsampleConformerEncoderV2
|
| 8 |
+
input_size: 512
|
| 9 |
+
output_size: 512
|
| 10 |
+
input_layer: 'linear'
|
| 11 |
+
pre_lookahead_len: 3
|
| 12 |
+
num_blocks: 6
|
| 13 |
+
num_up_blocks: 4
|
| 14 |
+
up_stride: 2
|
| 15 |
+
up_scale_factor: 2
|
| 16 |
+
attention_heads: 8
|
| 17 |
+
pos_enc_layer_type: 'rel_pos_espnet'
|
| 18 |
+
selfattention_layer_type: 'rel_selfattn'
|
| 19 |
+
key_bias: true
|
| 20 |
+
linear_units: 2048
|
| 21 |
+
dropout_rate: 0.1
|
| 22 |
+
positional_dropout_rate: 0.1
|
| 23 |
+
attention_dropout_rate: 0.1
|
| 24 |
+
normalize_before: True
|
| 25 |
+
decoder: !new:cosyvoice2.flow.flow_matching.CausalConditionalCFM
|
| 26 |
+
inference_cfg_rate: 0.7
|
| 27 |
+
estimator: !new:cosyvoice2.flow.decoder_dit.DiT
|
| 28 |
+
in_channels: 320
|
| 29 |
+
out_channels: 80
|
| 30 |
+
mlp_ratio: 4.0
|
| 31 |
+
depth: 16
|
| 32 |
+
num_heads: 8
|
| 33 |
+
head_dim: 64
|
| 34 |
+
hidden_size: 512
|
models/Step-Audio-2-mini/token2wav/hift.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3386cc880324d4e98e05987b99107f49e40ed925b8ecc87c1f4939432d429879
|
| 3 |
+
size 83390254
|
models/Step-Audio-2-mini/token2wav/speech_tokenizer_v2_25hz.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d43342aa12163a80bf07bffb94c9de2e120a8df2f9917cd2f642e7f4219c6f71
|
| 3 |
+
size 496082973
|
models/Step-Audio-2-mini/tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:529f599b059f0e73c30a25205b906a84add3988c964de0b7db090f34d71e2e6e
|
| 3 |
+
size 12684784
|
models/Step-Audio-2-mini/tokenizer_config.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
models/Step-Audio-2-mini/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|