Automatic Speech Recognition
Transformers
Safetensors
meralion2
meralion
meralion-2
custom_code
YingxuHe commited on
Commit
826a474
·
verified ·
1 Parent(s): 7523163

Upload MERaLiONForConditionalGeneration

Browse files
config.json CHANGED
@@ -1,7 +1,10 @@
1
  {
2
- "_attn_implementation_autoset": true,
 
 
3
  "auto_map": {
4
- "AutoConfig": "configuration_meralion.MERaLiONConfig"
 
5
  },
6
  "head_dim": 256,
7
  "hidden_size": 3584,
@@ -92,5 +95,6 @@
92
  "use_cache": true,
93
  "vocab_size": 256000
94
  },
 
95
  "transformers_version": "4.50.1"
96
  }
 
1
  {
2
+ "architectures": [
3
+ "MERaLiONForConditionalGeneration"
4
+ ],
5
  "auto_map": {
6
+ "AutoConfig": "configuration_meralion.MERaLiONConfig",
7
+ "AutoModelForSpeechSeq2Seq": "modeling_meralion.MERaLiONForConditionalGeneration"
8
  },
9
  "head_dim": 256,
10
  "hidden_size": 3584,
 
95
  "use_cache": true,
96
  "vocab_size": 256000
97
  },
98
+ "torch_dtype": "bfloat16",
99
  "transformers_version": "4.50.1"
100
  }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": 1,
6
+ "no_repeat_ngram_size": 8,
7
+ "pad_token_id": 0,
8
+ "transformers_version": "4.50.1"
9
+ }
model-00001-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3d2166f3c168ce1c446fb8473f40840594aac988f846506557f798607c71054
3
+ size 4944547928
model-00002-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:324708c9a3f19490c146ac8ab9f470b7dc68745edb864982debc480aad3b007f
3
+ size 4991612584
model-00003-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e68949c5c760558b98f0728b9bf81653b2c95342b746a0c6b788fded33b970c
3
+ size 4918183272
model-00004-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:396a57bb9974d9aa04793bdbb69209bb719b36382cb98a91d6a1d331d39f5a8b
3
+ size 4991612648
model-00005-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a1e95e5975aea36eab0706275c54bdb3265720a889b371296d6b5584cf5e2a8
3
+ size 367038856
model.safetensors.index.json ADDED
@@ -0,0 +1,968 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 20212875776
4
+ },
5
+ "weight_map": {
6
+ "ln_speech.bias": "model-00001-of-00005.safetensors",
7
+ "ln_speech.weight": "model-00001-of-00005.safetensors",
8
+ "speech_audio_adapter.gate_proj.bias": "model-00001-of-00005.safetensors",
9
+ "speech_audio_adapter.gate_proj.weight": "model-00001-of-00005.safetensors",
10
+ "speech_audio_adapter.mlp_adapter.0.bias": "model-00001-of-00005.safetensors",
11
+ "speech_audio_adapter.mlp_adapter.0.weight": "model-00001-of-00005.safetensors",
12
+ "speech_audio_adapter.out_proj.bias": "model-00001-of-00005.safetensors",
13
+ "speech_audio_adapter.out_proj.weight": "model-00001-of-00005.safetensors",
14
+ "speech_audio_adapter.pool_proj.bias": "model-00001-of-00005.safetensors",
15
+ "speech_audio_adapter.pool_proj.weight": "model-00001-of-00005.safetensors",
16
+ "speech_encoder.conv1.bias": "model-00001-of-00005.safetensors",
17
+ "speech_encoder.conv1.weight": "model-00001-of-00005.safetensors",
18
+ "speech_encoder.conv2.bias": "model-00001-of-00005.safetensors",
19
+ "speech_encoder.conv2.weight": "model-00001-of-00005.safetensors",
20
+ "speech_encoder.embed_positions.weight": "model-00001-of-00005.safetensors",
21
+ "speech_encoder.layer_norm.bias": "model-00001-of-00005.safetensors",
22
+ "speech_encoder.layer_norm.weight": "model-00001-of-00005.safetensors",
23
+ "speech_encoder.layers.0.fc1.bias": "model-00001-of-00005.safetensors",
24
+ "speech_encoder.layers.0.fc1.weight": "model-00001-of-00005.safetensors",
25
+ "speech_encoder.layers.0.fc2.bias": "model-00001-of-00005.safetensors",
26
+ "speech_encoder.layers.0.fc2.weight": "model-00001-of-00005.safetensors",
27
+ "speech_encoder.layers.0.final_layer_norm.bias": "model-00001-of-00005.safetensors",
28
+ "speech_encoder.layers.0.final_layer_norm.weight": "model-00001-of-00005.safetensors",
29
+ "speech_encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
30
+ "speech_encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
31
+ "speech_encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
32
+ "speech_encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
33
+ "speech_encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
34
+ "speech_encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
35
+ "speech_encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
36
+ "speech_encoder.layers.0.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
37
+ "speech_encoder.layers.0.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
38
+ "speech_encoder.layers.1.fc1.bias": "model-00001-of-00005.safetensors",
39
+ "speech_encoder.layers.1.fc1.weight": "model-00001-of-00005.safetensors",
40
+ "speech_encoder.layers.1.fc2.bias": "model-00001-of-00005.safetensors",
41
+ "speech_encoder.layers.1.fc2.weight": "model-00001-of-00005.safetensors",
42
+ "speech_encoder.layers.1.final_layer_norm.bias": "model-00001-of-00005.safetensors",
43
+ "speech_encoder.layers.1.final_layer_norm.weight": "model-00001-of-00005.safetensors",
44
+ "speech_encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
45
+ "speech_encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
46
+ "speech_encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
47
+ "speech_encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
48
+ "speech_encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
49
+ "speech_encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
50
+ "speech_encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
51
+ "speech_encoder.layers.1.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
52
+ "speech_encoder.layers.1.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
53
+ "speech_encoder.layers.10.fc1.bias": "model-00001-of-00005.safetensors",
54
+ "speech_encoder.layers.10.fc1.weight": "model-00001-of-00005.safetensors",
55
+ "speech_encoder.layers.10.fc2.bias": "model-00001-of-00005.safetensors",
56
+ "speech_encoder.layers.10.fc2.weight": "model-00001-of-00005.safetensors",
57
+ "speech_encoder.layers.10.final_layer_norm.bias": "model-00001-of-00005.safetensors",
58
+ "speech_encoder.layers.10.final_layer_norm.weight": "model-00001-of-00005.safetensors",
59
+ "speech_encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
60
+ "speech_encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
61
+ "speech_encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
62
+ "speech_encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
63
+ "speech_encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
64
+ "speech_encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
65
+ "speech_encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
66
+ "speech_encoder.layers.10.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
67
+ "speech_encoder.layers.10.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
68
+ "speech_encoder.layers.11.fc1.bias": "model-00001-of-00005.safetensors",
69
+ "speech_encoder.layers.11.fc1.weight": "model-00001-of-00005.safetensors",
70
+ "speech_encoder.layers.11.fc2.bias": "model-00001-of-00005.safetensors",
71
+ "speech_encoder.layers.11.fc2.weight": "model-00001-of-00005.safetensors",
72
+ "speech_encoder.layers.11.final_layer_norm.bias": "model-00001-of-00005.safetensors",
73
+ "speech_encoder.layers.11.final_layer_norm.weight": "model-00001-of-00005.safetensors",
74
+ "speech_encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
75
+ "speech_encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
76
+ "speech_encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
77
+ "speech_encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
78
+ "speech_encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
79
+ "speech_encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
80
+ "speech_encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
81
+ "speech_encoder.layers.11.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
82
+ "speech_encoder.layers.11.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
83
+ "speech_encoder.layers.12.fc1.bias": "model-00001-of-00005.safetensors",
84
+ "speech_encoder.layers.12.fc1.weight": "model-00001-of-00005.safetensors",
85
+ "speech_encoder.layers.12.fc2.bias": "model-00001-of-00005.safetensors",
86
+ "speech_encoder.layers.12.fc2.weight": "model-00001-of-00005.safetensors",
87
+ "speech_encoder.layers.12.final_layer_norm.bias": "model-00001-of-00005.safetensors",
88
+ "speech_encoder.layers.12.final_layer_norm.weight": "model-00001-of-00005.safetensors",
89
+ "speech_encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
90
+ "speech_encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
91
+ "speech_encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
92
+ "speech_encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
93
+ "speech_encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
94
+ "speech_encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
95
+ "speech_encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
96
+ "speech_encoder.layers.12.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
97
+ "speech_encoder.layers.12.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
98
+ "speech_encoder.layers.13.fc1.bias": "model-00001-of-00005.safetensors",
99
+ "speech_encoder.layers.13.fc1.weight": "model-00001-of-00005.safetensors",
100
+ "speech_encoder.layers.13.fc2.bias": "model-00001-of-00005.safetensors",
101
+ "speech_encoder.layers.13.fc2.weight": "model-00001-of-00005.safetensors",
102
+ "speech_encoder.layers.13.final_layer_norm.bias": "model-00001-of-00005.safetensors",
103
+ "speech_encoder.layers.13.final_layer_norm.weight": "model-00001-of-00005.safetensors",
104
+ "speech_encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
105
+ "speech_encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
106
+ "speech_encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
107
+ "speech_encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
108
+ "speech_encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
109
+ "speech_encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
110
+ "speech_encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
111
+ "speech_encoder.layers.13.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
112
+ "speech_encoder.layers.13.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
113
+ "speech_encoder.layers.14.fc1.bias": "model-00001-of-00005.safetensors",
114
+ "speech_encoder.layers.14.fc1.weight": "model-00001-of-00005.safetensors",
115
+ "speech_encoder.layers.14.fc2.bias": "model-00001-of-00005.safetensors",
116
+ "speech_encoder.layers.14.fc2.weight": "model-00001-of-00005.safetensors",
117
+ "speech_encoder.layers.14.final_layer_norm.bias": "model-00001-of-00005.safetensors",
118
+ "speech_encoder.layers.14.final_layer_norm.weight": "model-00001-of-00005.safetensors",
119
+ "speech_encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
120
+ "speech_encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
121
+ "speech_encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
122
+ "speech_encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
123
+ "speech_encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
124
+ "speech_encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
125
+ "speech_encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
126
+ "speech_encoder.layers.14.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
127
+ "speech_encoder.layers.14.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
128
+ "speech_encoder.layers.15.fc1.bias": "model-00001-of-00005.safetensors",
129
+ "speech_encoder.layers.15.fc1.weight": "model-00001-of-00005.safetensors",
130
+ "speech_encoder.layers.15.fc2.bias": "model-00001-of-00005.safetensors",
131
+ "speech_encoder.layers.15.fc2.weight": "model-00001-of-00005.safetensors",
132
+ "speech_encoder.layers.15.final_layer_norm.bias": "model-00001-of-00005.safetensors",
133
+ "speech_encoder.layers.15.final_layer_norm.weight": "model-00001-of-00005.safetensors",
134
+ "speech_encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
135
+ "speech_encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
136
+ "speech_encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
137
+ "speech_encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
138
+ "speech_encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
139
+ "speech_encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
140
+ "speech_encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
141
+ "speech_encoder.layers.15.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
142
+ "speech_encoder.layers.15.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
143
+ "speech_encoder.layers.16.fc1.bias": "model-00001-of-00005.safetensors",
144
+ "speech_encoder.layers.16.fc1.weight": "model-00001-of-00005.safetensors",
145
+ "speech_encoder.layers.16.fc2.bias": "model-00001-of-00005.safetensors",
146
+ "speech_encoder.layers.16.fc2.weight": "model-00001-of-00005.safetensors",
147
+ "speech_encoder.layers.16.final_layer_norm.bias": "model-00001-of-00005.safetensors",
148
+ "speech_encoder.layers.16.final_layer_norm.weight": "model-00001-of-00005.safetensors",
149
+ "speech_encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
150
+ "speech_encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
151
+ "speech_encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
152
+ "speech_encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
153
+ "speech_encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
154
+ "speech_encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
155
+ "speech_encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
156
+ "speech_encoder.layers.16.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
157
+ "speech_encoder.layers.16.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
158
+ "speech_encoder.layers.17.fc1.bias": "model-00001-of-00005.safetensors",
159
+ "speech_encoder.layers.17.fc1.weight": "model-00001-of-00005.safetensors",
160
+ "speech_encoder.layers.17.fc2.bias": "model-00001-of-00005.safetensors",
161
+ "speech_encoder.layers.17.fc2.weight": "model-00001-of-00005.safetensors",
162
+ "speech_encoder.layers.17.final_layer_norm.bias": "model-00001-of-00005.safetensors",
163
+ "speech_encoder.layers.17.final_layer_norm.weight": "model-00001-of-00005.safetensors",
164
+ "speech_encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
165
+ "speech_encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
166
+ "speech_encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
167
+ "speech_encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
168
+ "speech_encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
169
+ "speech_encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
170
+ "speech_encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
171
+ "speech_encoder.layers.17.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
172
+ "speech_encoder.layers.17.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
173
+ "speech_encoder.layers.18.fc1.bias": "model-00001-of-00005.safetensors",
174
+ "speech_encoder.layers.18.fc1.weight": "model-00001-of-00005.safetensors",
175
+ "speech_encoder.layers.18.fc2.bias": "model-00001-of-00005.safetensors",
176
+ "speech_encoder.layers.18.fc2.weight": "model-00001-of-00005.safetensors",
177
+ "speech_encoder.layers.18.final_layer_norm.bias": "model-00001-of-00005.safetensors",
178
+ "speech_encoder.layers.18.final_layer_norm.weight": "model-00001-of-00005.safetensors",
179
+ "speech_encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
180
+ "speech_encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
181
+ "speech_encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
182
+ "speech_encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
183
+ "speech_encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
184
+ "speech_encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
185
+ "speech_encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
186
+ "speech_encoder.layers.18.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
187
+ "speech_encoder.layers.18.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
188
+ "speech_encoder.layers.19.fc1.bias": "model-00001-of-00005.safetensors",
189
+ "speech_encoder.layers.19.fc1.weight": "model-00001-of-00005.safetensors",
190
+ "speech_encoder.layers.19.fc2.bias": "model-00001-of-00005.safetensors",
191
+ "speech_encoder.layers.19.fc2.weight": "model-00001-of-00005.safetensors",
192
+ "speech_encoder.layers.19.final_layer_norm.bias": "model-00001-of-00005.safetensors",
193
+ "speech_encoder.layers.19.final_layer_norm.weight": "model-00001-of-00005.safetensors",
194
+ "speech_encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
195
+ "speech_encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
196
+ "speech_encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
197
+ "speech_encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
198
+ "speech_encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
199
+ "speech_encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
200
+ "speech_encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
201
+ "speech_encoder.layers.19.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
202
+ "speech_encoder.layers.19.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
203
+ "speech_encoder.layers.2.fc1.bias": "model-00001-of-00005.safetensors",
204
+ "speech_encoder.layers.2.fc1.weight": "model-00001-of-00005.safetensors",
205
+ "speech_encoder.layers.2.fc2.bias": "model-00001-of-00005.safetensors",
206
+ "speech_encoder.layers.2.fc2.weight": "model-00001-of-00005.safetensors",
207
+ "speech_encoder.layers.2.final_layer_norm.bias": "model-00001-of-00005.safetensors",
208
+ "speech_encoder.layers.2.final_layer_norm.weight": "model-00001-of-00005.safetensors",
209
+ "speech_encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
210
+ "speech_encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
211
+ "speech_encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
212
+ "speech_encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
213
+ "speech_encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
214
+ "speech_encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
215
+ "speech_encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
216
+ "speech_encoder.layers.2.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
217
+ "speech_encoder.layers.2.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
218
+ "speech_encoder.layers.20.fc1.bias": "model-00001-of-00005.safetensors",
219
+ "speech_encoder.layers.20.fc1.weight": "model-00001-of-00005.safetensors",
220
+ "speech_encoder.layers.20.fc2.bias": "model-00001-of-00005.safetensors",
221
+ "speech_encoder.layers.20.fc2.weight": "model-00001-of-00005.safetensors",
222
+ "speech_encoder.layers.20.final_layer_norm.bias": "model-00001-of-00005.safetensors",
223
+ "speech_encoder.layers.20.final_layer_norm.weight": "model-00001-of-00005.safetensors",
224
+ "speech_encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
225
+ "speech_encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
226
+ "speech_encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
227
+ "speech_encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
228
+ "speech_encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
229
+ "speech_encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
230
+ "speech_encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
231
+ "speech_encoder.layers.20.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
232
+ "speech_encoder.layers.20.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
233
+ "speech_encoder.layers.21.fc1.bias": "model-00001-of-00005.safetensors",
234
+ "speech_encoder.layers.21.fc1.weight": "model-00001-of-00005.safetensors",
235
+ "speech_encoder.layers.21.fc2.bias": "model-00001-of-00005.safetensors",
236
+ "speech_encoder.layers.21.fc2.weight": "model-00001-of-00005.safetensors",
237
+ "speech_encoder.layers.21.final_layer_norm.bias": "model-00001-of-00005.safetensors",
238
+ "speech_encoder.layers.21.final_layer_norm.weight": "model-00001-of-00005.safetensors",
239
+ "speech_encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
240
+ "speech_encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
241
+ "speech_encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
242
+ "speech_encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
243
+ "speech_encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
244
+ "speech_encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
245
+ "speech_encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
246
+ "speech_encoder.layers.21.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
247
+ "speech_encoder.layers.21.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
248
+ "speech_encoder.layers.22.fc1.bias": "model-00001-of-00005.safetensors",
249
+ "speech_encoder.layers.22.fc1.weight": "model-00001-of-00005.safetensors",
250
+ "speech_encoder.layers.22.fc2.bias": "model-00001-of-00005.safetensors",
251
+ "speech_encoder.layers.22.fc2.weight": "model-00001-of-00005.safetensors",
252
+ "speech_encoder.layers.22.final_layer_norm.bias": "model-00001-of-00005.safetensors",
253
+ "speech_encoder.layers.22.final_layer_norm.weight": "model-00001-of-00005.safetensors",
254
+ "speech_encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
255
+ "speech_encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
256
+ "speech_encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
257
+ "speech_encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
258
+ "speech_encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
259
+ "speech_encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
260
+ "speech_encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
261
+ "speech_encoder.layers.22.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
262
+ "speech_encoder.layers.22.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
263
+ "speech_encoder.layers.23.fc1.bias": "model-00001-of-00005.safetensors",
264
+ "speech_encoder.layers.23.fc1.weight": "model-00001-of-00005.safetensors",
265
+ "speech_encoder.layers.23.fc2.bias": "model-00001-of-00005.safetensors",
266
+ "speech_encoder.layers.23.fc2.weight": "model-00001-of-00005.safetensors",
267
+ "speech_encoder.layers.23.final_layer_norm.bias": "model-00001-of-00005.safetensors",
268
+ "speech_encoder.layers.23.final_layer_norm.weight": "model-00001-of-00005.safetensors",
269
+ "speech_encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
270
+ "speech_encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
271
+ "speech_encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
272
+ "speech_encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
273
+ "speech_encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
274
+ "speech_encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
275
+ "speech_encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
276
+ "speech_encoder.layers.23.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
277
+ "speech_encoder.layers.23.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
278
+ "speech_encoder.layers.24.fc1.bias": "model-00001-of-00005.safetensors",
279
+ "speech_encoder.layers.24.fc1.weight": "model-00001-of-00005.safetensors",
280
+ "speech_encoder.layers.24.fc2.bias": "model-00001-of-00005.safetensors",
281
+ "speech_encoder.layers.24.fc2.weight": "model-00001-of-00005.safetensors",
282
+ "speech_encoder.layers.24.final_layer_norm.bias": "model-00001-of-00005.safetensors",
283
+ "speech_encoder.layers.24.final_layer_norm.weight": "model-00001-of-00005.safetensors",
284
+ "speech_encoder.layers.24.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
285
+ "speech_encoder.layers.24.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
286
+ "speech_encoder.layers.24.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
287
+ "speech_encoder.layers.24.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
288
+ "speech_encoder.layers.24.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
289
+ "speech_encoder.layers.24.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
290
+ "speech_encoder.layers.24.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
291
+ "speech_encoder.layers.24.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
292
+ "speech_encoder.layers.24.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
293
+ "speech_encoder.layers.25.fc1.bias": "model-00001-of-00005.safetensors",
294
+ "speech_encoder.layers.25.fc1.weight": "model-00001-of-00005.safetensors",
295
+ "speech_encoder.layers.25.fc2.bias": "model-00001-of-00005.safetensors",
296
+ "speech_encoder.layers.25.fc2.weight": "model-00001-of-00005.safetensors",
297
+ "speech_encoder.layers.25.final_layer_norm.bias": "model-00001-of-00005.safetensors",
298
+ "speech_encoder.layers.25.final_layer_norm.weight": "model-00001-of-00005.safetensors",
299
+ "speech_encoder.layers.25.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
300
+ "speech_encoder.layers.25.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
301
+ "speech_encoder.layers.25.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
302
+ "speech_encoder.layers.25.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
303
+ "speech_encoder.layers.25.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
304
+ "speech_encoder.layers.25.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
305
+ "speech_encoder.layers.25.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
306
+ "speech_encoder.layers.25.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
307
+ "speech_encoder.layers.25.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
308
+ "speech_encoder.layers.26.fc1.bias": "model-00001-of-00005.safetensors",
309
+ "speech_encoder.layers.26.fc1.weight": "model-00001-of-00005.safetensors",
310
+ "speech_encoder.layers.26.fc2.bias": "model-00001-of-00005.safetensors",
311
+ "speech_encoder.layers.26.fc2.weight": "model-00001-of-00005.safetensors",
312
+ "speech_encoder.layers.26.final_layer_norm.bias": "model-00001-of-00005.safetensors",
313
+ "speech_encoder.layers.26.final_layer_norm.weight": "model-00001-of-00005.safetensors",
314
+ "speech_encoder.layers.26.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
315
+ "speech_encoder.layers.26.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
316
+ "speech_encoder.layers.26.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
317
+ "speech_encoder.layers.26.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
318
+ "speech_encoder.layers.26.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
319
+ "speech_encoder.layers.26.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
320
+ "speech_encoder.layers.26.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
321
+ "speech_encoder.layers.26.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
322
+ "speech_encoder.layers.26.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
323
+ "speech_encoder.layers.27.fc1.bias": "model-00001-of-00005.safetensors",
324
+ "speech_encoder.layers.27.fc1.weight": "model-00001-of-00005.safetensors",
325
+ "speech_encoder.layers.27.fc2.bias": "model-00001-of-00005.safetensors",
326
+ "speech_encoder.layers.27.fc2.weight": "model-00001-of-00005.safetensors",
327
+ "speech_encoder.layers.27.final_layer_norm.bias": "model-00001-of-00005.safetensors",
328
+ "speech_encoder.layers.27.final_layer_norm.weight": "model-00001-of-00005.safetensors",
329
+ "speech_encoder.layers.27.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
330
+ "speech_encoder.layers.27.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
331
+ "speech_encoder.layers.27.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
332
+ "speech_encoder.layers.27.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
333
+ "speech_encoder.layers.27.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
334
+ "speech_encoder.layers.27.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
335
+ "speech_encoder.layers.27.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
336
+ "speech_encoder.layers.27.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
337
+ "speech_encoder.layers.27.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
338
+ "speech_encoder.layers.28.fc1.bias": "model-00001-of-00005.safetensors",
339
+ "speech_encoder.layers.28.fc1.weight": "model-00001-of-00005.safetensors",
340
+ "speech_encoder.layers.28.fc2.bias": "model-00001-of-00005.safetensors",
341
+ "speech_encoder.layers.28.fc2.weight": "model-00001-of-00005.safetensors",
342
+ "speech_encoder.layers.28.final_layer_norm.bias": "model-00001-of-00005.safetensors",
343
+ "speech_encoder.layers.28.final_layer_norm.weight": "model-00001-of-00005.safetensors",
344
+ "speech_encoder.layers.28.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
345
+ "speech_encoder.layers.28.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
346
+ "speech_encoder.layers.28.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
347
+ "speech_encoder.layers.28.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
348
+ "speech_encoder.layers.28.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
349
+ "speech_encoder.layers.28.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
350
+ "speech_encoder.layers.28.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
351
+ "speech_encoder.layers.28.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
352
+ "speech_encoder.layers.28.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
353
+ "speech_encoder.layers.29.fc1.bias": "model-00001-of-00005.safetensors",
354
+ "speech_encoder.layers.29.fc1.weight": "model-00001-of-00005.safetensors",
355
+ "speech_encoder.layers.29.fc2.bias": "model-00001-of-00005.safetensors",
356
+ "speech_encoder.layers.29.fc2.weight": "model-00001-of-00005.safetensors",
357
+ "speech_encoder.layers.29.final_layer_norm.bias": "model-00001-of-00005.safetensors",
358
+ "speech_encoder.layers.29.final_layer_norm.weight": "model-00001-of-00005.safetensors",
359
+ "speech_encoder.layers.29.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
360
+ "speech_encoder.layers.29.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
361
+ "speech_encoder.layers.29.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
362
+ "speech_encoder.layers.29.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
363
+ "speech_encoder.layers.29.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
364
+ "speech_encoder.layers.29.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
365
+ "speech_encoder.layers.29.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
366
+ "speech_encoder.layers.29.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
367
+ "speech_encoder.layers.29.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
368
+ "speech_encoder.layers.3.fc1.bias": "model-00001-of-00005.safetensors",
369
+ "speech_encoder.layers.3.fc1.weight": "model-00001-of-00005.safetensors",
370
+ "speech_encoder.layers.3.fc2.bias": "model-00001-of-00005.safetensors",
371
+ "speech_encoder.layers.3.fc2.weight": "model-00001-of-00005.safetensors",
372
+ "speech_encoder.layers.3.final_layer_norm.bias": "model-00001-of-00005.safetensors",
373
+ "speech_encoder.layers.3.final_layer_norm.weight": "model-00001-of-00005.safetensors",
374
+ "speech_encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
375
+ "speech_encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
376
+ "speech_encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
377
+ "speech_encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
378
+ "speech_encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
379
+ "speech_encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
380
+ "speech_encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
381
+ "speech_encoder.layers.3.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
382
+ "speech_encoder.layers.3.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
383
+ "speech_encoder.layers.30.fc1.bias": "model-00001-of-00005.safetensors",
384
+ "speech_encoder.layers.30.fc1.weight": "model-00001-of-00005.safetensors",
385
+ "speech_encoder.layers.30.fc2.bias": "model-00001-of-00005.safetensors",
386
+ "speech_encoder.layers.30.fc2.weight": "model-00001-of-00005.safetensors",
387
+ "speech_encoder.layers.30.final_layer_norm.bias": "model-00001-of-00005.safetensors",
388
+ "speech_encoder.layers.30.final_layer_norm.weight": "model-00001-of-00005.safetensors",
389
+ "speech_encoder.layers.30.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
390
+ "speech_encoder.layers.30.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
391
+ "speech_encoder.layers.30.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
392
+ "speech_encoder.layers.30.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
393
+ "speech_encoder.layers.30.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
394
+ "speech_encoder.layers.30.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
395
+ "speech_encoder.layers.30.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
396
+ "speech_encoder.layers.30.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
397
+ "speech_encoder.layers.30.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
398
+ "speech_encoder.layers.31.fc1.bias": "model-00001-of-00005.safetensors",
399
+ "speech_encoder.layers.31.fc1.weight": "model-00001-of-00005.safetensors",
400
+ "speech_encoder.layers.31.fc2.bias": "model-00001-of-00005.safetensors",
401
+ "speech_encoder.layers.31.fc2.weight": "model-00001-of-00005.safetensors",
402
+ "speech_encoder.layers.31.final_layer_norm.bias": "model-00001-of-00005.safetensors",
403
+ "speech_encoder.layers.31.final_layer_norm.weight": "model-00001-of-00005.safetensors",
404
+ "speech_encoder.layers.31.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
405
+ "speech_encoder.layers.31.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
406
+ "speech_encoder.layers.31.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
407
+ "speech_encoder.layers.31.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
408
+ "speech_encoder.layers.31.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
409
+ "speech_encoder.layers.31.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
410
+ "speech_encoder.layers.31.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
411
+ "speech_encoder.layers.31.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
412
+ "speech_encoder.layers.31.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
413
+ "speech_encoder.layers.4.fc1.bias": "model-00001-of-00005.safetensors",
414
+ "speech_encoder.layers.4.fc1.weight": "model-00001-of-00005.safetensors",
415
+ "speech_encoder.layers.4.fc2.bias": "model-00001-of-00005.safetensors",
416
+ "speech_encoder.layers.4.fc2.weight": "model-00001-of-00005.safetensors",
417
+ "speech_encoder.layers.4.final_layer_norm.bias": "model-00001-of-00005.safetensors",
418
+ "speech_encoder.layers.4.final_layer_norm.weight": "model-00001-of-00005.safetensors",
419
+ "speech_encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
420
+ "speech_encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
421
+ "speech_encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
422
+ "speech_encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
423
+ "speech_encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
424
+ "speech_encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
425
+ "speech_encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
426
+ "speech_encoder.layers.4.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
427
+ "speech_encoder.layers.4.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
428
+ "speech_encoder.layers.5.fc1.bias": "model-00001-of-00005.safetensors",
429
+ "speech_encoder.layers.5.fc1.weight": "model-00001-of-00005.safetensors",
430
+ "speech_encoder.layers.5.fc2.bias": "model-00001-of-00005.safetensors",
431
+ "speech_encoder.layers.5.fc2.weight": "model-00001-of-00005.safetensors",
432
+ "speech_encoder.layers.5.final_layer_norm.bias": "model-00001-of-00005.safetensors",
433
+ "speech_encoder.layers.5.final_layer_norm.weight": "model-00001-of-00005.safetensors",
434
+ "speech_encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
435
+ "speech_encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
436
+ "speech_encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
437
+ "speech_encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
438
+ "speech_encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
439
+ "speech_encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
440
+ "speech_encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
441
+ "speech_encoder.layers.5.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
442
+ "speech_encoder.layers.5.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
443
+ "speech_encoder.layers.6.fc1.bias": "model-00001-of-00005.safetensors",
444
+ "speech_encoder.layers.6.fc1.weight": "model-00001-of-00005.safetensors",
445
+ "speech_encoder.layers.6.fc2.bias": "model-00001-of-00005.safetensors",
446
+ "speech_encoder.layers.6.fc2.weight": "model-00001-of-00005.safetensors",
447
+ "speech_encoder.layers.6.final_layer_norm.bias": "model-00001-of-00005.safetensors",
448
+ "speech_encoder.layers.6.final_layer_norm.weight": "model-00001-of-00005.safetensors",
449
+ "speech_encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
450
+ "speech_encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
451
+ "speech_encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
452
+ "speech_encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
453
+ "speech_encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
454
+ "speech_encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
455
+ "speech_encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
456
+ "speech_encoder.layers.6.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
457
+ "speech_encoder.layers.6.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
458
+ "speech_encoder.layers.7.fc1.bias": "model-00001-of-00005.safetensors",
459
+ "speech_encoder.layers.7.fc1.weight": "model-00001-of-00005.safetensors",
460
+ "speech_encoder.layers.7.fc2.bias": "model-00001-of-00005.safetensors",
461
+ "speech_encoder.layers.7.fc2.weight": "model-00001-of-00005.safetensors",
462
+ "speech_encoder.layers.7.final_layer_norm.bias": "model-00001-of-00005.safetensors",
463
+ "speech_encoder.layers.7.final_layer_norm.weight": "model-00001-of-00005.safetensors",
464
+ "speech_encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
465
+ "speech_encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
466
+ "speech_encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
467
+ "speech_encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
468
+ "speech_encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
469
+ "speech_encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
470
+ "speech_encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
471
+ "speech_encoder.layers.7.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
472
+ "speech_encoder.layers.7.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
473
+ "speech_encoder.layers.8.fc1.bias": "model-00001-of-00005.safetensors",
474
+ "speech_encoder.layers.8.fc1.weight": "model-00001-of-00005.safetensors",
475
+ "speech_encoder.layers.8.fc2.bias": "model-00001-of-00005.safetensors",
476
+ "speech_encoder.layers.8.fc2.weight": "model-00001-of-00005.safetensors",
477
+ "speech_encoder.layers.8.final_layer_norm.bias": "model-00001-of-00005.safetensors",
478
+ "speech_encoder.layers.8.final_layer_norm.weight": "model-00001-of-00005.safetensors",
479
+ "speech_encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
480
+ "speech_encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
481
+ "speech_encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
482
+ "speech_encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
483
+ "speech_encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
484
+ "speech_encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
485
+ "speech_encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
486
+ "speech_encoder.layers.8.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
487
+ "speech_encoder.layers.8.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
488
+ "speech_encoder.layers.9.fc1.bias": "model-00001-of-00005.safetensors",
489
+ "speech_encoder.layers.9.fc1.weight": "model-00001-of-00005.safetensors",
490
+ "speech_encoder.layers.9.fc2.bias": "model-00001-of-00005.safetensors",
491
+ "speech_encoder.layers.9.fc2.weight": "model-00001-of-00005.safetensors",
492
+ "speech_encoder.layers.9.final_layer_norm.bias": "model-00001-of-00005.safetensors",
493
+ "speech_encoder.layers.9.final_layer_norm.weight": "model-00001-of-00005.safetensors",
494
+ "speech_encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
495
+ "speech_encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
496
+ "speech_encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
497
+ "speech_encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
498
+ "speech_encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
499
+ "speech_encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
500
+ "speech_encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
501
+ "speech_encoder.layers.9.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
502
+ "speech_encoder.layers.9.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
503
+ "text_decoder.model.embed_tokens.weight": "model-00001-of-00005.safetensors",
504
+ "text_decoder.model.layers.0.input_layernorm.weight": "model-00001-of-00005.safetensors",
505
+ "text_decoder.model.layers.0.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
506
+ "text_decoder.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
507
+ "text_decoder.model.layers.0.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
508
+ "text_decoder.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
509
+ "text_decoder.model.layers.0.post_feedforward_layernorm.weight": "model-00001-of-00005.safetensors",
510
+ "text_decoder.model.layers.0.pre_feedforward_layernorm.weight": "model-00001-of-00005.safetensors",
511
+ "text_decoder.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
512
+ "text_decoder.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
513
+ "text_decoder.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
514
+ "text_decoder.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
515
+ "text_decoder.model.layers.1.input_layernorm.weight": "model-00001-of-00005.safetensors",
516
+ "text_decoder.model.layers.1.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
517
+ "text_decoder.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
518
+ "text_decoder.model.layers.1.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
519
+ "text_decoder.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
520
+ "text_decoder.model.layers.1.post_feedforward_layernorm.weight": "model-00001-of-00005.safetensors",
521
+ "text_decoder.model.layers.1.pre_feedforward_layernorm.weight": "model-00001-of-00005.safetensors",
522
+ "text_decoder.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
523
+ "text_decoder.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
524
+ "text_decoder.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
525
+ "text_decoder.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
526
+ "text_decoder.model.layers.10.input_layernorm.weight": "model-00002-of-00005.safetensors",
527
+ "text_decoder.model.layers.10.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
528
+ "text_decoder.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
529
+ "text_decoder.model.layers.10.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
530
+ "text_decoder.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
531
+ "text_decoder.model.layers.10.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
532
+ "text_decoder.model.layers.10.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
533
+ "text_decoder.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
534
+ "text_decoder.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
535
+ "text_decoder.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
536
+ "text_decoder.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
537
+ "text_decoder.model.layers.11.input_layernorm.weight": "model-00002-of-00005.safetensors",
538
+ "text_decoder.model.layers.11.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
539
+ "text_decoder.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
540
+ "text_decoder.model.layers.11.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
541
+ "text_decoder.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
542
+ "text_decoder.model.layers.11.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
543
+ "text_decoder.model.layers.11.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
544
+ "text_decoder.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
545
+ "text_decoder.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
546
+ "text_decoder.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
547
+ "text_decoder.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
548
+ "text_decoder.model.layers.12.input_layernorm.weight": "model-00002-of-00005.safetensors",
549
+ "text_decoder.model.layers.12.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
550
+ "text_decoder.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
551
+ "text_decoder.model.layers.12.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
552
+ "text_decoder.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
553
+ "text_decoder.model.layers.12.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
554
+ "text_decoder.model.layers.12.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
555
+ "text_decoder.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
556
+ "text_decoder.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
557
+ "text_decoder.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
558
+ "text_decoder.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
559
+ "text_decoder.model.layers.13.input_layernorm.weight": "model-00002-of-00005.safetensors",
560
+ "text_decoder.model.layers.13.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
561
+ "text_decoder.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
562
+ "text_decoder.model.layers.13.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
563
+ "text_decoder.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
564
+ "text_decoder.model.layers.13.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
565
+ "text_decoder.model.layers.13.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
566
+ "text_decoder.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
567
+ "text_decoder.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
568
+ "text_decoder.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
569
+ "text_decoder.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
570
+ "text_decoder.model.layers.14.input_layernorm.weight": "model-00002-of-00005.safetensors",
571
+ "text_decoder.model.layers.14.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
572
+ "text_decoder.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
573
+ "text_decoder.model.layers.14.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
574
+ "text_decoder.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
575
+ "text_decoder.model.layers.14.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
576
+ "text_decoder.model.layers.14.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
577
+ "text_decoder.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
578
+ "text_decoder.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
579
+ "text_decoder.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
580
+ "text_decoder.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
581
+ "text_decoder.model.layers.15.input_layernorm.weight": "model-00002-of-00005.safetensors",
582
+ "text_decoder.model.layers.15.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
583
+ "text_decoder.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
584
+ "text_decoder.model.layers.15.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
585
+ "text_decoder.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
586
+ "text_decoder.model.layers.15.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
587
+ "text_decoder.model.layers.15.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
588
+ "text_decoder.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
589
+ "text_decoder.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
590
+ "text_decoder.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
591
+ "text_decoder.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
592
+ "text_decoder.model.layers.16.input_layernorm.weight": "model-00003-of-00005.safetensors",
593
+ "text_decoder.model.layers.16.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
594
+ "text_decoder.model.layers.16.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
595
+ "text_decoder.model.layers.16.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
596
+ "text_decoder.model.layers.16.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
597
+ "text_decoder.model.layers.16.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
598
+ "text_decoder.model.layers.16.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
599
+ "text_decoder.model.layers.16.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
600
+ "text_decoder.model.layers.16.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
601
+ "text_decoder.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
602
+ "text_decoder.model.layers.16.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
603
+ "text_decoder.model.layers.17.input_layernorm.weight": "model-00003-of-00005.safetensors",
604
+ "text_decoder.model.layers.17.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
605
+ "text_decoder.model.layers.17.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
606
+ "text_decoder.model.layers.17.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
607
+ "text_decoder.model.layers.17.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
608
+ "text_decoder.model.layers.17.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
609
+ "text_decoder.model.layers.17.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
610
+ "text_decoder.model.layers.17.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
611
+ "text_decoder.model.layers.17.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
612
+ "text_decoder.model.layers.17.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
613
+ "text_decoder.model.layers.17.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
614
+ "text_decoder.model.layers.18.input_layernorm.weight": "model-00003-of-00005.safetensors",
615
+ "text_decoder.model.layers.18.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
616
+ "text_decoder.model.layers.18.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
617
+ "text_decoder.model.layers.18.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
618
+ "text_decoder.model.layers.18.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
619
+ "text_decoder.model.layers.18.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
620
+ "text_decoder.model.layers.18.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
621
+ "text_decoder.model.layers.18.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
622
+ "text_decoder.model.layers.18.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
623
+ "text_decoder.model.layers.18.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
624
+ "text_decoder.model.layers.18.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
625
+ "text_decoder.model.layers.19.input_layernorm.weight": "model-00003-of-00005.safetensors",
626
+ "text_decoder.model.layers.19.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
627
+ "text_decoder.model.layers.19.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
628
+ "text_decoder.model.layers.19.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
629
+ "text_decoder.model.layers.19.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
630
+ "text_decoder.model.layers.19.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
631
+ "text_decoder.model.layers.19.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
632
+ "text_decoder.model.layers.19.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
633
+ "text_decoder.model.layers.19.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
634
+ "text_decoder.model.layers.19.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
635
+ "text_decoder.model.layers.19.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
636
+ "text_decoder.model.layers.2.input_layernorm.weight": "model-00001-of-00005.safetensors",
637
+ "text_decoder.model.layers.2.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
638
+ "text_decoder.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
639
+ "text_decoder.model.layers.2.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
640
+ "text_decoder.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
641
+ "text_decoder.model.layers.2.post_feedforward_layernorm.weight": "model-00001-of-00005.safetensors",
642
+ "text_decoder.model.layers.2.pre_feedforward_layernorm.weight": "model-00001-of-00005.safetensors",
643
+ "text_decoder.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
644
+ "text_decoder.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
645
+ "text_decoder.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
646
+ "text_decoder.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
647
+ "text_decoder.model.layers.20.input_layernorm.weight": "model-00003-of-00005.safetensors",
648
+ "text_decoder.model.layers.20.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
649
+ "text_decoder.model.layers.20.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
650
+ "text_decoder.model.layers.20.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
651
+ "text_decoder.model.layers.20.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
652
+ "text_decoder.model.layers.20.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
653
+ "text_decoder.model.layers.20.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
654
+ "text_decoder.model.layers.20.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
655
+ "text_decoder.model.layers.20.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
656
+ "text_decoder.model.layers.20.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
657
+ "text_decoder.model.layers.20.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
658
+ "text_decoder.model.layers.21.input_layernorm.weight": "model-00003-of-00005.safetensors",
659
+ "text_decoder.model.layers.21.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
660
+ "text_decoder.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
661
+ "text_decoder.model.layers.21.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
662
+ "text_decoder.model.layers.21.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
663
+ "text_decoder.model.layers.21.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
664
+ "text_decoder.model.layers.21.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
665
+ "text_decoder.model.layers.21.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
666
+ "text_decoder.model.layers.21.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
667
+ "text_decoder.model.layers.21.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
668
+ "text_decoder.model.layers.21.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
669
+ "text_decoder.model.layers.22.input_layernorm.weight": "model-00003-of-00005.safetensors",
670
+ "text_decoder.model.layers.22.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
671
+ "text_decoder.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
672
+ "text_decoder.model.layers.22.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
673
+ "text_decoder.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
674
+ "text_decoder.model.layers.22.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
675
+ "text_decoder.model.layers.22.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
676
+ "text_decoder.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
677
+ "text_decoder.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
678
+ "text_decoder.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
679
+ "text_decoder.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
680
+ "text_decoder.model.layers.23.input_layernorm.weight": "model-00003-of-00005.safetensors",
681
+ "text_decoder.model.layers.23.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
682
+ "text_decoder.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
683
+ "text_decoder.model.layers.23.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
684
+ "text_decoder.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
685
+ "text_decoder.model.layers.23.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
686
+ "text_decoder.model.layers.23.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
687
+ "text_decoder.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
688
+ "text_decoder.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
689
+ "text_decoder.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
690
+ "text_decoder.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
691
+ "text_decoder.model.layers.24.input_layernorm.weight": "model-00003-of-00005.safetensors",
692
+ "text_decoder.model.layers.24.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
693
+ "text_decoder.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
694
+ "text_decoder.model.layers.24.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
695
+ "text_decoder.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
696
+ "text_decoder.model.layers.24.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
697
+ "text_decoder.model.layers.24.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
698
+ "text_decoder.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
699
+ "text_decoder.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
700
+ "text_decoder.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
701
+ "text_decoder.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
702
+ "text_decoder.model.layers.25.input_layernorm.weight": "model-00003-of-00005.safetensors",
703
+ "text_decoder.model.layers.25.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
704
+ "text_decoder.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
705
+ "text_decoder.model.layers.25.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
706
+ "text_decoder.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
707
+ "text_decoder.model.layers.25.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
708
+ "text_decoder.model.layers.25.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
709
+ "text_decoder.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
710
+ "text_decoder.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
711
+ "text_decoder.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
712
+ "text_decoder.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
713
+ "text_decoder.model.layers.26.input_layernorm.weight": "model-00003-of-00005.safetensors",
714
+ "text_decoder.model.layers.26.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
715
+ "text_decoder.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
716
+ "text_decoder.model.layers.26.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
717
+ "text_decoder.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
718
+ "text_decoder.model.layers.26.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
719
+ "text_decoder.model.layers.26.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
720
+ "text_decoder.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
721
+ "text_decoder.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
722
+ "text_decoder.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
723
+ "text_decoder.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
724
+ "text_decoder.model.layers.27.input_layernorm.weight": "model-00003-of-00005.safetensors",
725
+ "text_decoder.model.layers.27.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
726
+ "text_decoder.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
727
+ "text_decoder.model.layers.27.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
728
+ "text_decoder.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
729
+ "text_decoder.model.layers.27.post_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
730
+ "text_decoder.model.layers.27.pre_feedforward_layernorm.weight": "model-00003-of-00005.safetensors",
731
+ "text_decoder.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
732
+ "text_decoder.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
733
+ "text_decoder.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
734
+ "text_decoder.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
735
+ "text_decoder.model.layers.28.input_layernorm.weight": "model-00004-of-00005.safetensors",
736
+ "text_decoder.model.layers.28.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
737
+ "text_decoder.model.layers.28.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
738
+ "text_decoder.model.layers.28.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
739
+ "text_decoder.model.layers.28.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
740
+ "text_decoder.model.layers.28.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
741
+ "text_decoder.model.layers.28.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
742
+ "text_decoder.model.layers.28.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
743
+ "text_decoder.model.layers.28.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
744
+ "text_decoder.model.layers.28.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
745
+ "text_decoder.model.layers.28.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
746
+ "text_decoder.model.layers.29.input_layernorm.weight": "model-00004-of-00005.safetensors",
747
+ "text_decoder.model.layers.29.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
748
+ "text_decoder.model.layers.29.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
749
+ "text_decoder.model.layers.29.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
750
+ "text_decoder.model.layers.29.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
751
+ "text_decoder.model.layers.29.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
752
+ "text_decoder.model.layers.29.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
753
+ "text_decoder.model.layers.29.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
754
+ "text_decoder.model.layers.29.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
755
+ "text_decoder.model.layers.29.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
756
+ "text_decoder.model.layers.29.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
757
+ "text_decoder.model.layers.3.input_layernorm.weight": "model-00002-of-00005.safetensors",
758
+ "text_decoder.model.layers.3.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
759
+ "text_decoder.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
760
+ "text_decoder.model.layers.3.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
761
+ "text_decoder.model.layers.3.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
762
+ "text_decoder.model.layers.3.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
763
+ "text_decoder.model.layers.3.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
764
+ "text_decoder.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
765
+ "text_decoder.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
766
+ "text_decoder.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
767
+ "text_decoder.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
768
+ "text_decoder.model.layers.30.input_layernorm.weight": "model-00004-of-00005.safetensors",
769
+ "text_decoder.model.layers.30.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
770
+ "text_decoder.model.layers.30.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
771
+ "text_decoder.model.layers.30.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
772
+ "text_decoder.model.layers.30.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
773
+ "text_decoder.model.layers.30.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
774
+ "text_decoder.model.layers.30.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
775
+ "text_decoder.model.layers.30.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
776
+ "text_decoder.model.layers.30.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
777
+ "text_decoder.model.layers.30.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
778
+ "text_decoder.model.layers.30.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
779
+ "text_decoder.model.layers.31.input_layernorm.weight": "model-00004-of-00005.safetensors",
780
+ "text_decoder.model.layers.31.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
781
+ "text_decoder.model.layers.31.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
782
+ "text_decoder.model.layers.31.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
783
+ "text_decoder.model.layers.31.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
784
+ "text_decoder.model.layers.31.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
785
+ "text_decoder.model.layers.31.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
786
+ "text_decoder.model.layers.31.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
787
+ "text_decoder.model.layers.31.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
788
+ "text_decoder.model.layers.31.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
789
+ "text_decoder.model.layers.31.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
790
+ "text_decoder.model.layers.32.input_layernorm.weight": "model-00004-of-00005.safetensors",
791
+ "text_decoder.model.layers.32.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
792
+ "text_decoder.model.layers.32.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
793
+ "text_decoder.model.layers.32.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
794
+ "text_decoder.model.layers.32.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
795
+ "text_decoder.model.layers.32.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
796
+ "text_decoder.model.layers.32.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
797
+ "text_decoder.model.layers.32.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
798
+ "text_decoder.model.layers.32.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
799
+ "text_decoder.model.layers.32.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
800
+ "text_decoder.model.layers.32.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
801
+ "text_decoder.model.layers.33.input_layernorm.weight": "model-00004-of-00005.safetensors",
802
+ "text_decoder.model.layers.33.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
803
+ "text_decoder.model.layers.33.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
804
+ "text_decoder.model.layers.33.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
805
+ "text_decoder.model.layers.33.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
806
+ "text_decoder.model.layers.33.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
807
+ "text_decoder.model.layers.33.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
808
+ "text_decoder.model.layers.33.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
809
+ "text_decoder.model.layers.33.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
810
+ "text_decoder.model.layers.33.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
811
+ "text_decoder.model.layers.33.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
812
+ "text_decoder.model.layers.34.input_layernorm.weight": "model-00004-of-00005.safetensors",
813
+ "text_decoder.model.layers.34.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
814
+ "text_decoder.model.layers.34.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
815
+ "text_decoder.model.layers.34.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
816
+ "text_decoder.model.layers.34.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
817
+ "text_decoder.model.layers.34.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
818
+ "text_decoder.model.layers.34.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
819
+ "text_decoder.model.layers.34.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
820
+ "text_decoder.model.layers.34.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
821
+ "text_decoder.model.layers.34.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
822
+ "text_decoder.model.layers.34.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
823
+ "text_decoder.model.layers.35.input_layernorm.weight": "model-00004-of-00005.safetensors",
824
+ "text_decoder.model.layers.35.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
825
+ "text_decoder.model.layers.35.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
826
+ "text_decoder.model.layers.35.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
827
+ "text_decoder.model.layers.35.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
828
+ "text_decoder.model.layers.35.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
829
+ "text_decoder.model.layers.35.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
830
+ "text_decoder.model.layers.35.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
831
+ "text_decoder.model.layers.35.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
832
+ "text_decoder.model.layers.35.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
833
+ "text_decoder.model.layers.35.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
834
+ "text_decoder.model.layers.36.input_layernorm.weight": "model-00004-of-00005.safetensors",
835
+ "text_decoder.model.layers.36.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
836
+ "text_decoder.model.layers.36.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
837
+ "text_decoder.model.layers.36.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
838
+ "text_decoder.model.layers.36.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
839
+ "text_decoder.model.layers.36.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
840
+ "text_decoder.model.layers.36.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
841
+ "text_decoder.model.layers.36.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
842
+ "text_decoder.model.layers.36.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
843
+ "text_decoder.model.layers.36.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
844
+ "text_decoder.model.layers.36.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
845
+ "text_decoder.model.layers.37.input_layernorm.weight": "model-00004-of-00005.safetensors",
846
+ "text_decoder.model.layers.37.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
847
+ "text_decoder.model.layers.37.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
848
+ "text_decoder.model.layers.37.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
849
+ "text_decoder.model.layers.37.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
850
+ "text_decoder.model.layers.37.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
851
+ "text_decoder.model.layers.37.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
852
+ "text_decoder.model.layers.37.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
853
+ "text_decoder.model.layers.37.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
854
+ "text_decoder.model.layers.37.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
855
+ "text_decoder.model.layers.37.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
856
+ "text_decoder.model.layers.38.input_layernorm.weight": "model-00004-of-00005.safetensors",
857
+ "text_decoder.model.layers.38.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
858
+ "text_decoder.model.layers.38.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
859
+ "text_decoder.model.layers.38.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
860
+ "text_decoder.model.layers.38.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
861
+ "text_decoder.model.layers.38.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
862
+ "text_decoder.model.layers.38.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
863
+ "text_decoder.model.layers.38.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
864
+ "text_decoder.model.layers.38.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
865
+ "text_decoder.model.layers.38.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
866
+ "text_decoder.model.layers.38.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
867
+ "text_decoder.model.layers.39.input_layernorm.weight": "model-00004-of-00005.safetensors",
868
+ "text_decoder.model.layers.39.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
869
+ "text_decoder.model.layers.39.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
870
+ "text_decoder.model.layers.39.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
871
+ "text_decoder.model.layers.39.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
872
+ "text_decoder.model.layers.39.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
873
+ "text_decoder.model.layers.39.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
874
+ "text_decoder.model.layers.39.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
875
+ "text_decoder.model.layers.39.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
876
+ "text_decoder.model.layers.39.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
877
+ "text_decoder.model.layers.39.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
878
+ "text_decoder.model.layers.4.input_layernorm.weight": "model-00002-of-00005.safetensors",
879
+ "text_decoder.model.layers.4.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
880
+ "text_decoder.model.layers.4.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
881
+ "text_decoder.model.layers.4.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
882
+ "text_decoder.model.layers.4.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
883
+ "text_decoder.model.layers.4.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
884
+ "text_decoder.model.layers.4.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
885
+ "text_decoder.model.layers.4.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
886
+ "text_decoder.model.layers.4.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
887
+ "text_decoder.model.layers.4.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
888
+ "text_decoder.model.layers.4.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
889
+ "text_decoder.model.layers.40.input_layernorm.weight": "model-00004-of-00005.safetensors",
890
+ "text_decoder.model.layers.40.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
891
+ "text_decoder.model.layers.40.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
892
+ "text_decoder.model.layers.40.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
893
+ "text_decoder.model.layers.40.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
894
+ "text_decoder.model.layers.40.post_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
895
+ "text_decoder.model.layers.40.pre_feedforward_layernorm.weight": "model-00004-of-00005.safetensors",
896
+ "text_decoder.model.layers.40.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
897
+ "text_decoder.model.layers.40.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
898
+ "text_decoder.model.layers.40.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
899
+ "text_decoder.model.layers.40.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
900
+ "text_decoder.model.layers.41.input_layernorm.weight": "model-00005-of-00005.safetensors",
901
+ "text_decoder.model.layers.41.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
902
+ "text_decoder.model.layers.41.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
903
+ "text_decoder.model.layers.41.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
904
+ "text_decoder.model.layers.41.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
905
+ "text_decoder.model.layers.41.post_feedforward_layernorm.weight": "model-00005-of-00005.safetensors",
906
+ "text_decoder.model.layers.41.pre_feedforward_layernorm.weight": "model-00005-of-00005.safetensors",
907
+ "text_decoder.model.layers.41.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
908
+ "text_decoder.model.layers.41.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
909
+ "text_decoder.model.layers.41.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
910
+ "text_decoder.model.layers.41.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
911
+ "text_decoder.model.layers.5.input_layernorm.weight": "model-00002-of-00005.safetensors",
912
+ "text_decoder.model.layers.5.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
913
+ "text_decoder.model.layers.5.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
914
+ "text_decoder.model.layers.5.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
915
+ "text_decoder.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
916
+ "text_decoder.model.layers.5.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
917
+ "text_decoder.model.layers.5.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
918
+ "text_decoder.model.layers.5.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
919
+ "text_decoder.model.layers.5.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
920
+ "text_decoder.model.layers.5.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
921
+ "text_decoder.model.layers.5.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
922
+ "text_decoder.model.layers.6.input_layernorm.weight": "model-00002-of-00005.safetensors",
923
+ "text_decoder.model.layers.6.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
924
+ "text_decoder.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
925
+ "text_decoder.model.layers.6.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
926
+ "text_decoder.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
927
+ "text_decoder.model.layers.6.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
928
+ "text_decoder.model.layers.6.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
929
+ "text_decoder.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
930
+ "text_decoder.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
931
+ "text_decoder.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
932
+ "text_decoder.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
933
+ "text_decoder.model.layers.7.input_layernorm.weight": "model-00002-of-00005.safetensors",
934
+ "text_decoder.model.layers.7.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
935
+ "text_decoder.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
936
+ "text_decoder.model.layers.7.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
937
+ "text_decoder.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
938
+ "text_decoder.model.layers.7.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
939
+ "text_decoder.model.layers.7.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
940
+ "text_decoder.model.layers.7.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
941
+ "text_decoder.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
942
+ "text_decoder.model.layers.7.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
943
+ "text_decoder.model.layers.7.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
944
+ "text_decoder.model.layers.8.input_layernorm.weight": "model-00002-of-00005.safetensors",
945
+ "text_decoder.model.layers.8.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
946
+ "text_decoder.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
947
+ "text_decoder.model.layers.8.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
948
+ "text_decoder.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
949
+ "text_decoder.model.layers.8.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
950
+ "text_decoder.model.layers.8.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
951
+ "text_decoder.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
952
+ "text_decoder.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
953
+ "text_decoder.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
954
+ "text_decoder.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
955
+ "text_decoder.model.layers.9.input_layernorm.weight": "model-00002-of-00005.safetensors",
956
+ "text_decoder.model.layers.9.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
957
+ "text_decoder.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
958
+ "text_decoder.model.layers.9.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
959
+ "text_decoder.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
960
+ "text_decoder.model.layers.9.post_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
961
+ "text_decoder.model.layers.9.pre_feedforward_layernorm.weight": "model-00002-of-00005.safetensors",
962
+ "text_decoder.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
963
+ "text_decoder.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
964
+ "text_decoder.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
965
+ "text_decoder.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
966
+ "text_decoder.model.norm.weight": "model-00005-of-00005.safetensors"
967
+ }
968
+ }
modeling_meralion.py ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PyTorch MERaLiON AudioLLM model."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import List, Optional, Tuple, Union
5
+
6
+ import torch
7
+ import torch.utils.checkpoint
8
+ from torch import nn
9
+
10
+ from transformers import Gemma2ForCausalLM
11
+ from transformers.models.whisper.modeling_whisper import WhisperEncoder
12
+ from transformers.cache_utils import HybridCache
13
+ from transformers.generation import GenerationMixin
14
+ from transformers.modeling_outputs import ModelOutput
15
+ from transformers.modeling_utils import PreTrainedModel
16
+ from transformers.utils import (
17
+ add_start_docstrings,
18
+ add_start_docstrings_to_model_forward,
19
+ logging,
20
+ replace_return_docstrings,
21
+ )
22
+
23
+ from .configuration_meralion import MERaLiONConfig
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ _CONFIG_FOR_DOC = "MERaLiONConfig"
29
+
30
+
31
+ # Copied from transformers.models.llama.modeling_llama._prepare_4d_causal_attention_mask_with_cache_position
32
+ def _prepare_4d_causal_attention_mask_with_cache_position(
33
+ attention_mask: torch.Tensor,
34
+ sequence_length: int,
35
+ target_length: int,
36
+ dtype: torch.dtype,
37
+ device: torch.device,
38
+ min_dtype: float,
39
+ cache_position: torch.Tensor,
40
+ batch_size: int,
41
+ ):
42
+ """
43
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
44
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
45
+
46
+ Args:
47
+ attention_mask (`torch.Tensor`):
48
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
49
+ sequence_length (`int`):
50
+ The sequence length being processed.
51
+ target_length (`int`):
52
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
53
+ dtype (`torch.dtype`):
54
+ The dtype to use for the 4D attention mask.
55
+ device (`torch.device`):
56
+ The device to plcae the 4D attention mask on.
57
+ min_dtype (`float`):
58
+ The minimum value representable with the dtype `dtype`.
59
+ cache_position (`torch.Tensor`):
60
+ Indices depicting the position of the input sequence tokens in the sequence.
61
+ batch_size (`torch.Tensor`):
62
+ Batch size.
63
+ """
64
+ if attention_mask is not None and attention_mask.dim() == 4:
65
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
66
+ causal_mask = attention_mask
67
+ else:
68
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
69
+ if sequence_length != 1:
70
+ causal_mask = torch.triu(causal_mask, diagonal=1)
71
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
72
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
73
+ if attention_mask is not None:
74
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
75
+ mask_length = attention_mask.shape[-1]
76
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
77
+ padding_mask = padding_mask == 0
78
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
79
+ padding_mask, min_dtype
80
+ )
81
+ return causal_mask
82
+
83
+
84
+ # copied from Qwen2AudioCausalLMOutputWithPast
85
+ @dataclass
86
+ class MERaLiONOutputWithPast(ModelOutput):
87
+ """
88
+ Base class for MERaLiON causal language model (or autoregressive) outputs.
89
+
90
+ Args:
91
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
92
+ Language modeling loss (for next-token prediction).
93
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
94
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
95
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
96
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
97
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
98
+
99
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
100
+ `past_key_values` input) to speed up sequential decoding.
101
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
102
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
103
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
104
+
105
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
106
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
107
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
108
+ sequence_length)`.
109
+
110
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
111
+ heads.
112
+ attention_mask (`torch.FloatTensor`, *optional*):
113
+ Attentions mask, used to update attention mask and position_ids.
114
+ """
115
+
116
+ loss: Optional[torch.FloatTensor] = None
117
+ logits: torch.FloatTensor = None
118
+ past_key_values: Optional[List[torch.FloatTensor]] = None
119
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
120
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
121
+ attention_mask: Optional[torch.FloatTensor] = None
122
+
123
+
124
+ MERALION_START_DOCSTRING = r"""
125
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
126
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
127
+ etc.)
128
+
129
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
130
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
131
+ and behavior.
132
+
133
+ Parameters:
134
+ config ([`MERaLiONConfig`]):
135
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
136
+ load the weights associated with the model, only the configuration. Check out the
137
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
138
+ """
139
+
140
+
141
+ @add_start_docstrings(
142
+ "The bare MERaLiON Model outputting raw hidden-states without any specific head on top.",
143
+ MERALION_START_DOCSTRING,
144
+ )
145
+ class MERaLiONPreTrainedModel(PreTrainedModel):
146
+ config_class = MERaLiONConfig
147
+ base_model_prefix = "model"
148
+ supports_gradient_checkpointing = True
149
+ _no_split_modules = ["WhisperEncoderLayer", "WhisperDecoderLayer", "Gemma2DecoderLayer"]
150
+ _supports_flash_attn_2 = True
151
+ _supports_sdpa = True
152
+ _supports_cache_class = True
153
+ _supports_static_cache = True
154
+
155
+ def _init_weights(self, module):
156
+ # important: this ported version of Qwen2Audio isn't meant for training from scratch - only
157
+ # inference and fine-tuning - so the proper init weights code has been removed
158
+ std = self.config.init_std if hasattr(self.config, "init_std") else self.config.speech_config.init_std
159
+
160
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
161
+ module.weight.data.normal_(mean=0.0, std=std)
162
+ if module.bias is not None:
163
+ module.bias.data.zero_()
164
+ elif isinstance(module, nn.Embedding):
165
+ module.weight.data.normal_(mean=0.0, std=std)
166
+ if module.padding_idx is not None:
167
+ module.weight.data[module.padding_idx].zero_()
168
+
169
+ @property
170
+ def _supports_sdpa(self):
171
+ """
172
+ Retrieve language_model's attribute to check whether the model supports
173
+ SDPA or not.
174
+ """
175
+ return self.text_decoder._supports_sdpa
176
+
177
+ class MERaLiONSpeechAudioAdaper(nn.Module):
178
+ def __init__(
179
+ self,
180
+ config,
181
+ **kwargs
182
+ ):
183
+ super(MERaLiONSpeechAudioAdaper, self).__init__()
184
+ speech_audio_encoder_output_dim = config.speech_config.d_model
185
+ llm_input_hidden_size = config.text_config.hidden_size
186
+ speech_mlp_scale_factor = config.speech_mlp_scale_factor
187
+
188
+ self.speech_mlp_scale_factor = speech_mlp_scale_factor
189
+ self.mlp_adapter = nn.Sequential(
190
+ nn.Linear(
191
+ in_features=speech_audio_encoder_output_dim * speech_mlp_scale_factor,
192
+ out_features=speech_audio_encoder_output_dim
193
+ ),
194
+ nn.SiLU(),
195
+ nn.Dropout(0.1),
196
+ )
197
+
198
+ self.speech_llm_proj = nn.Sequential(
199
+ nn.Linear(
200
+ speech_audio_encoder_output_dim,
201
+ speech_audio_encoder_output_dim * 4
202
+ ),
203
+ nn.SiLU(),
204
+ nn.Dropout(0.1),
205
+
206
+ nn.Linear(
207
+ speech_audio_encoder_output_dim * 4,
208
+ llm_input_hidden_size
209
+ ),
210
+ )
211
+
212
+ def forward(self, speech_embeds, **kwargs):
213
+ B, T, C = speech_embeds.shape
214
+ speech_embeds = self.mlp_adapter(
215
+ speech_embeds.reshape(
216
+ B,
217
+ T // self.speech_mlp_scale_factor,
218
+ C * self.speech_mlp_scale_factor,
219
+ )
220
+ )
221
+ return self.speech_llm_proj(speech_embeds)
222
+
223
+
224
+ class MERaLiONSpeechAudioAdaperLarge(nn.Module):
225
+ def __init__(
226
+ self,
227
+ config,
228
+ **kwargs
229
+ ):
230
+ super(MERaLiONSpeechAudioAdaperLarge, self).__init__()
231
+ speech_audio_encoder_output_dim = config.speech_config.d_model
232
+ llm_input_hidden_size = config.text_config.hidden_size
233
+ speech_mlp_scale_factor = config.speech_mlp_scale_factor
234
+
235
+ self.speech_mlp_scale_factor = speech_mlp_scale_factor
236
+ self.mlp_adapter = nn.Sequential(
237
+ nn.Linear(
238
+ in_features=speech_audio_encoder_output_dim * speech_mlp_scale_factor,
239
+ out_features=speech_audio_encoder_output_dim * 5,
240
+ ),
241
+ nn.SiLU(),
242
+ nn.Dropout(0.01),
243
+ )
244
+
245
+ self.gate_proj = nn.Linear(
246
+ in_features=speech_audio_encoder_output_dim * 5,
247
+ out_features=speech_audio_encoder_output_dim * 5,
248
+ )
249
+
250
+ self.pool_proj = nn.Linear(
251
+ in_features=speech_audio_encoder_output_dim * 5,
252
+ out_features=speech_audio_encoder_output_dim * 5,
253
+ )
254
+ self.act_fn = nn.SiLU()
255
+ self.out_proj = nn.Linear(
256
+ speech_audio_encoder_output_dim * 5,
257
+ llm_input_hidden_size,
258
+ )
259
+
260
+
261
+ def forward(self, speech_embeds, **kwargs):
262
+ B, T, C = speech_embeds.shape
263
+ speech_embeds = self.mlp_adapter(
264
+ speech_embeds.reshape(
265
+ B,
266
+ T // self.speech_mlp_scale_factor,
267
+ C * self.speech_mlp_scale_factor,
268
+ )
269
+ )
270
+ speech_embeds = self.act_fn(self.gate_proj(speech_embeds)) * self.pool_proj(speech_embeds)
271
+ speech_embeds = self.out_proj(speech_embeds)
272
+ return speech_embeds
273
+
274
+
275
+ MERALION_INPUTS_DOCSTRING = r"""
276
+ Args:
277
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
278
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
279
+ it.
280
+
281
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
282
+ [`PreTrainedTokenizer.__call__`] for details.
283
+
284
+ [What are input IDs?](../glossary#input-ids)
285
+ input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, feature_sequence_length)`, *optional*):
286
+ Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by
287
+ loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via
288
+ the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
289
+ [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a
290
+ tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
291
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
292
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
293
+
294
+ - 1 for tokens that are **not masked**,
295
+ - 0 for tokens that are **masked**.
296
+
297
+ [What are attention masks?](../glossary#attention-mask)
298
+
299
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
300
+ [`PreTrainedTokenizer.__call__`] for details.
301
+
302
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
303
+ `past_key_values`).
304
+
305
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
306
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
307
+ information on the default strategy.
308
+
309
+ - 1 indicates the head is **not masked**,
310
+ - 0 indicates the head is **masked**.
311
+ feature_attention_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`, *optional*):
312
+ Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
313
+
314
+ - 1 for tokens that are **not masked**,
315
+ - 0 for tokens that are **masked**.
316
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
317
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
318
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
319
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
320
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
321
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
322
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
323
+
324
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
325
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
326
+
327
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
328
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
329
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
330
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
331
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
332
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
333
+ model's internal embedding lookup matrix.
334
+ use_cache (`bool`, *optional*):
335
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
336
+ `past_key_values`).
337
+ output_attentions (`bool`, *optional*):
338
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
339
+ tensors for more detail.
340
+ output_hidden_states (`bool`, *optional*):
341
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
342
+ more detail.
343
+ return_dict (`bool`, *optional*):
344
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
345
+ """
346
+
347
+ @add_start_docstrings(
348
+ """The MERALION model which consists of a audio backbone and a language model.""",
349
+ MERALION_START_DOCSTRING,
350
+ )
351
+ class MERaLiONForConditionalGeneration(MERaLiONPreTrainedModel, GenerationMixin):
352
+ def __init__(self, config: MERaLiONConfig):
353
+ config.text_config._attn_implementation = config._attn_implementation
354
+ config.speech_config._attn_implementation = config._attn_implementation
355
+
356
+ super().__init__(config)
357
+
358
+ self.speech_encoder = WhisperEncoder(config.speech_config)
359
+ # self.speech_encoder = AutoModel.from_config(config.audio_config, attn_implementation=config._attn_implementation)
360
+
361
+ self.ln_speech = nn.LayerNorm(config.speech_config.d_model)
362
+ self.speech_audio_adapter = MERaLiONSpeechAudioAdaperLarge(config)
363
+ self.vocab_size = config.text_config.vocab_size
364
+ self.text_decoder = Gemma2ForCausalLM(config.text_config)
365
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
366
+ self._padding_side = "left" # set it to left by default, user can use setter to change padding_sides
367
+ self.post_init()
368
+
369
+ @property
370
+ def padding_side(self):
371
+ return self._padding_side
372
+
373
+ @padding_side.setter
374
+ def padding_side(self, padding_side: str):
375
+ if padding_side not in ["left", "right"]:
376
+ raise ValueError(f"{padding_side} is not `left` or `right`.")
377
+ self._padding_side = padding_side
378
+
379
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_input_embeddings
380
+ def get_input_embeddings(self):
381
+ return self.text_decoder.get_input_embeddings()
382
+
383
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_input_embeddings
384
+ def set_input_embeddings(self, value):
385
+ self.text_decoder.set_input_embeddings(value)
386
+
387
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_output_embeddings
388
+ def get_output_embeddings(self):
389
+ return self.text_decoder.get_output_embeddings()
390
+
391
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_output_embeddings
392
+ def set_output_embeddings(self, new_embeddings):
393
+ self.text_decoder.set_output_embeddings(new_embeddings)
394
+
395
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_decoder
396
+ def set_decoder(self, decoder):
397
+ self.text_decoder.set_decoder(decoder)
398
+
399
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_decoder
400
+ def get_decoder(self):
401
+ return self.text_decoder.get_decoder()
402
+
403
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.tie_weights
404
+ def tie_weights(self):
405
+ return self.text_decoder.tie_weights()
406
+
407
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.resize_token_embeddings
408
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
409
+ model_embeds = self.text_decoder.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
410
+ # update vocab size
411
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
412
+ self.vocab_size = model_embeds.num_embeddings
413
+ return model_embeds
414
+
415
+ @add_start_docstrings_to_model_forward(MERALION_INPUTS_DOCSTRING)
416
+ @replace_return_docstrings(output_type=MERaLiONOutputWithPast, config_class=_CONFIG_FOR_DOC)
417
+ def forward(
418
+ self,
419
+ input_ids: torch.LongTensor = None,
420
+ input_features: torch.FloatTensor = None,
421
+ attention_mask: Optional[torch.Tensor] = None,
422
+ feature_attention_mask: Optional[torch.Tensor] = None,
423
+ position_ids: Optional[torch.LongTensor] = None,
424
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
425
+ inputs_embeds: Optional[torch.FloatTensor] = None,
426
+ labels: Optional[torch.LongTensor] = None,
427
+ use_cache: Optional[bool] = None,
428
+ cache_position: Optional[torch.LongTensor] = None,
429
+ output_attentions: Optional[bool] = None,
430
+ output_hidden_states: Optional[bool] = None,
431
+ return_dict: Optional[bool] = None,
432
+ ) -> Union[Tuple, MERaLiONOutputWithPast]:
433
+ r"""
434
+ Args:
435
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
436
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
437
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
438
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
439
+
440
+ Returns:
441
+ """
442
+
443
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
444
+ output_hidden_states = (
445
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
446
+ )
447
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
448
+
449
+ speech_encoder_device = self.speech_encoder.device
450
+
451
+ if input_features is not None:
452
+ input_features = input_features.to(speech_encoder_device)
453
+ feature_attention_mask = feature_attention_mask.to(speech_encoder_device)
454
+
455
+ if inputs_embeds is None:
456
+ speech_contexts_embeds = self.speech_encoder(input_features, attention_mask=feature_attention_mask).last_hidden_state
457
+ speech_contexts_embeds = self.ln_speech(speech_contexts_embeds)
458
+ speech_audio_contexts_embeds = self.speech_audio_adapter(speech_contexts_embeds)
459
+
460
+ inputs_embeds = self.text_decoder.base_model.embed_tokens(input_ids)
461
+
462
+ speech_mask = (input_ids == self.config.speech_token_index).unsqueeze(-1)
463
+ speech_mask = speech_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
464
+
465
+ inputs_embeds = inputs_embeds.masked_scatter(speech_mask, speech_audio_contexts_embeds)
466
+
467
+ input_ids = None
468
+
469
+ outputs = self.text_decoder(
470
+ input_ids=input_ids,
471
+ attention_mask=attention_mask,
472
+ position_ids=position_ids,
473
+ past_key_values=past_key_values,
474
+ inputs_embeds=inputs_embeds,
475
+ use_cache=use_cache,
476
+ cache_position=cache_position,
477
+ output_attentions=output_attentions,
478
+ output_hidden_states=output_hidden_states,
479
+ return_dict=return_dict,
480
+ labels=labels
481
+ )
482
+
483
+ return outputs
484
+
485
+ # from transformers.models.gemma2.modeling_gemma2.Gemma2ForCausalLM.prepare_inputs_for_generation
486
+ def prepare_inputs_for_generation(
487
+ self,
488
+ input_ids,
489
+ attention_mask=None,
490
+ input_features=None,
491
+ feature_attention_mask=None,
492
+ past_key_values=None,
493
+ inputs_embeds=None,
494
+ cache_position=None,
495
+ position_ids=None,
496
+ use_cache=None,
497
+ **kwargs,
498
+ ):
499
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
500
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
501
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
502
+ is_first_step = cache_position[0].item() == 0
503
+ if past_key_values is not None:
504
+ if inputs_embeds is not None: # Exception 1
505
+ input_ids = input_ids[:, -cache_position.shape[0] :]
506
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
507
+ input_ids = input_ids[:, cache_position]
508
+
509
+ if attention_mask is not None and position_ids is None:
510
+ # create position_ids on the fly for batch generation
511
+ position_ids = attention_mask.long().cumsum(-1) - 1
512
+ position_ids.masked_fill_(attention_mask == 0, 1)
513
+ if past_key_values:
514
+ position_ids = position_ids[:, -input_ids.shape[1] :]
515
+ # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s
516
+ # `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride
517
+ # during the decoding. Here, simply using `.contiguous()` is not sufficient as in the
518
+ # batch size = 1 case, `position_ids` is already contiguous but with varying stride
519
+ # which retriggers a capture.
520
+ position_ids = position_ids.clone(memory_format=torch.contiguous_format)
521
+
522
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
523
+ if inputs_embeds is not None and is_first_step:
524
+ model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
525
+ else:
526
+ # The clone here is for the same reason as for `position_ids`.
527
+ model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}
528
+
529
+ if (
530
+ isinstance(past_key_values, HybridCache)
531
+ and attention_mask.ndim == 2
532
+ and not self.config._attn_implementation == "flash_attention_2"
533
+ ):
534
+ if model_inputs["inputs_embeds"] is not None:
535
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
536
+ device = model_inputs["inputs_embeds"].device
537
+ else:
538
+ batch_size, sequence_length = model_inputs["input_ids"].shape
539
+ device = model_inputs["input_ids"].device
540
+ dtype = self.text_decoder.lm_head.weight.dtype
541
+ min_dtype = torch.finfo(dtype).min
542
+ attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(
543
+ attention_mask,
544
+ sequence_length=sequence_length,
545
+ target_length=past_key_values.get_max_cache_shape(),
546
+ dtype=dtype,
547
+ device=device,
548
+ min_dtype=min_dtype,
549
+ cache_position=cache_position,
550
+ batch_size=batch_size,
551
+ )
552
+
553
+ model_inputs.update(
554
+ {
555
+ "attention_mask": attention_mask,
556
+ "position_ids": position_ids,
557
+ "cache_position": cache_position,
558
+ "past_key_values": past_key_values,
559
+ "use_cache": use_cache
560
+ }
561
+ )
562
+
563
+ # Input ids will only be used from the second step.
564
+ if is_first_step:
565
+ model_inputs["input_features"] = input_features
566
+ model_inputs["feature_attention_mask"] = feature_attention_mask
567
+
568
+ return model_inputs
569
+
570
+ def _reorder_cache(self, *args, **kwargs):
571
+ return self.text_decoder._reorder_cache(*args, **kwargs)