Hellina commited on
Commit
5c2c2fc
·
verified ·
1 Parent(s): dd50c7d

Model save

Browse files
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: facebook/wav2vec2-xls-r-300m
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - wer
8
+ model-index:
9
+ - name: fr_only
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # fr_only
17
+
18
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the None dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 1.1454
21
+ - Wer: 0.6672
22
+ - Cer: 0.3732
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 0.0003
42
+ - train_batch_size: 16
43
+ - eval_batch_size: 16
44
+ - seed: 42
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: linear
47
+ - num_epochs: 10.0
48
+ - mixed_precision_training: Native AMP
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss | Wer | Cer |
53
+ |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|
54
+ | 4.2678 | 0.8 | 500 | 3.9064 | 1.0 | 1.0 |
55
+ | 3.8874 | 1.61 | 1000 | 3.8322 | 1.0 | 1.0 |
56
+ | 3.7809 | 2.41 | 1500 | 3.5061 | 0.9737 | 0.8809 |
57
+ | 2.9215 | 3.21 | 2000 | 2.4683 | 0.9685 | 0.6577 |
58
+ | 2.3508 | 4.01 | 2500 | 2.1271 | 0.9144 | 0.6212 |
59
+ | 2.0152 | 4.82 | 3000 | 1.8754 | 0.8794 | 0.5677 |
60
+ | 1.6777 | 5.62 | 3500 | 1.5346 | 0.7830 | 0.4904 |
61
+ | 1.4068 | 6.42 | 4000 | 1.3605 | 0.7419 | 0.4583 |
62
+ | 1.1826 | 7.22 | 4500 | 1.2643 | 0.6912 | 0.4349 |
63
+ | 0.9938 | 8.03 | 5000 | 1.1641 | 0.7039 | 0.3977 |
64
+ | 0.7796 | 8.83 | 5500 | 1.1441 | 0.6934 | 0.3865 |
65
+ | 0.6479 | 9.63 | 6000 | 1.1454 | 0.6672 | 0.3732 |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - Transformers 4.36.2
71
+ - Pytorch 1.13.0+cu116
72
+ - Datasets 2.15.0
73
+ - Tokenizers 0.15.2
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "</s>": 235,
3
+ "<s>": 234
4
+ }
config.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.0,
4
+ "adapter_attn_dim": null,
5
+ "adapter_kernel_size": 3,
6
+ "adapter_stride": 2,
7
+ "add_adapter": false,
8
+ "apply_spec_augment": true,
9
+ "architectures": [
10
+ "Wav2Vec2ForCTC"
11
+ ],
12
+ "attention_dropout": 0.0,
13
+ "bos_token_id": 1,
14
+ "classifier_proj_size": 256,
15
+ "codevector_dim": 768,
16
+ "contrastive_logits_temperature": 0.1,
17
+ "conv_bias": true,
18
+ "conv_dim": [
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512,
25
+ 512
26
+ ],
27
+ "conv_kernel": [
28
+ 10,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 3,
33
+ 2,
34
+ 2
35
+ ],
36
+ "conv_stride": [
37
+ 5,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2,
43
+ 2
44
+ ],
45
+ "ctc_loss_reduction": "mean",
46
+ "ctc_zero_infinity": false,
47
+ "diversity_loss_weight": 0.1,
48
+ "do_stable_layer_norm": true,
49
+ "eos_token_id": 2,
50
+ "feat_extract_activation": "gelu",
51
+ "feat_extract_dropout": 0.0,
52
+ "feat_extract_norm": "layer",
53
+ "feat_proj_dropout": 0.0,
54
+ "feat_quantizer_dropout": 0.0,
55
+ "final_dropout": 0.0,
56
+ "hidden_act": "gelu",
57
+ "hidden_dropout": 0.0,
58
+ "hidden_size": 1024,
59
+ "initializer_range": 0.02,
60
+ "intermediate_size": 4096,
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_feature_length": 10,
64
+ "mask_feature_min_masks": 0,
65
+ "mask_feature_prob": 0.0,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_masks": 2,
68
+ "mask_time_prob": 0.05,
69
+ "model_type": "wav2vec2",
70
+ "num_adapter_layers": 3,
71
+ "num_attention_heads": 16,
72
+ "num_codevector_groups": 2,
73
+ "num_codevectors_per_group": 320,
74
+ "num_conv_pos_embedding_groups": 16,
75
+ "num_conv_pos_embeddings": 128,
76
+ "num_feat_extract_layers": 7,
77
+ "num_hidden_layers": 24,
78
+ "num_negatives": 100,
79
+ "output_hidden_size": 1024,
80
+ "pad_token_id": 233,
81
+ "proj_codevector_dim": 768,
82
+ "tdnn_dilation": [
83
+ 1,
84
+ 2,
85
+ 3,
86
+ 1,
87
+ 1
88
+ ],
89
+ "tdnn_dim": [
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 512,
94
+ 1500
95
+ ],
96
+ "tdnn_kernel": [
97
+ 5,
98
+ 3,
99
+ 3,
100
+ 1,
101
+ 1
102
+ ],
103
+ "torch_dtype": "float32",
104
+ "transformers_version": "4.36.2",
105
+ "use_weighted_layer_sum": false,
106
+ "vocab_size": 236,
107
+ "xvector_output_dim": 512
108
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8bd0115b2da1e133de57d4e4f42336204e2fee769e8f48fb77797b965e1843a
3
+ size 1262775032
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
runs/Aug28_07-59-16_tripel/events.out.tfevents.1724858350.tripel.3944336.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cfd84b2321a860415fce6ad467e181a6867cbfd1d7d837cb363d12879a24987
3
+ size 12526
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "[UNK]"
6
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "232": {
4
+ "content": "[UNK]",
5
+ "lstrip": true,
6
+ "normalized": false,
7
+ "rstrip": true,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "233": {
12
+ "content": "[PAD]",
13
+ "lstrip": true,
14
+ "normalized": false,
15
+ "rstrip": true,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "234": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "235": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "<s>",
37
+ "clean_up_tokenization_spaces": true,
38
+ "do_lower_case": false,
39
+ "eos_token": "</s>",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "[PAD]",
42
+ "replace_word_delimiter_char": " ",
43
+ "target_lang": null,
44
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
45
+ "unk_token": "[UNK]",
46
+ "word_delimiter_token": "|"
47
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46d7188718c990ba6b2347c1e033bd9daf6b0603816044b4fe8840c3aefec74f
3
+ size 4411
vocab.json ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 233,
3
+ "[UNK]": 232,
4
+ "a": 1,
5
+ "b": 2,
6
+ "c": 3,
7
+ "d": 4,
8
+ "e": 5,
9
+ "f": 6,
10
+ "g": 7,
11
+ "h": 8,
12
+ "i": 9,
13
+ "j": 10,
14
+ "k": 11,
15
+ "l": 12,
16
+ "m": 13,
17
+ "n": 14,
18
+ "o": 15,
19
+ "p": 16,
20
+ "q": 17,
21
+ "r": 18,
22
+ "s": 19,
23
+ "t": 20,
24
+ "u": 21,
25
+ "v": 22,
26
+ "w": 23,
27
+ "x": 24,
28
+ "y": 25,
29
+ "z": 26,
30
+ "|": 0,
31
+ "î": 27,
32
+ "œ": 28,
33
+ "ሀ": 29,
34
+ "ሁ": 30,
35
+ "ሂ": 31,
36
+ "ሃ": 32,
37
+ "ሄ": 33,
38
+ "ህ": 34,
39
+ "ሆ": 35,
40
+ "ለ": 36,
41
+ "ሉ": 37,
42
+ "ሊ": 38,
43
+ "ላ": 39,
44
+ "ሌ": 40,
45
+ "ል": 41,
46
+ "ሎ": 42,
47
+ "ሏ": 43,
48
+ "ሐ": 44,
49
+ "ሕ": 45,
50
+ "መ": 46,
51
+ "ሙ": 47,
52
+ "ሚ": 48,
53
+ "ማ": 49,
54
+ "ሜ": 50,
55
+ "ም": 51,
56
+ "ሞ": 52,
57
+ "ሟ": 53,
58
+ "ሠ": 54,
59
+ "ሡ": 55,
60
+ "ሣ": 56,
61
+ "ሥ": 57,
62
+ "ሦ": 58,
63
+ "ረ": 59,
64
+ "ሩ": 60,
65
+ "ሪ": 61,
66
+ "ራ": 62,
67
+ "ሬ": 63,
68
+ "ር": 64,
69
+ "ሮ": 65,
70
+ "ሯ": 66,
71
+ "ሰ": 67,
72
+ "ሱ": 68,
73
+ "ሲ": 69,
74
+ "ሳ": 70,
75
+ "ሴ": 71,
76
+ "ስ": 72,
77
+ "ሶ": 73,
78
+ "ሷ": 74,
79
+ "ሸ": 75,
80
+ "ሹ": 76,
81
+ "ሺ": 77,
82
+ "ሻ": 78,
83
+ "ሼ": 79,
84
+ "ሽ": 80,
85
+ "ሾ": 81,
86
+ "ቀ": 82,
87
+ "ቁ": 83,
88
+ "ቂ": 84,
89
+ "ቃ": 85,
90
+ "ቄ": 86,
91
+ "ቅ": 87,
92
+ "ቆ": 88,
93
+ "በ": 89,
94
+ "ቡ": 90,
95
+ "ቢ": 91,
96
+ "ባ": 92,
97
+ "ቤ": 93,
98
+ "ብ": 94,
99
+ "ቦ": 95,
100
+ "ቧ": 96,
101
+ "ቨ": 97,
102
+ "ቪ": 98,
103
+ "ቫ": 99,
104
+ "ቬ": 100,
105
+ "ቭ": 101,
106
+ "ተ": 102,
107
+ "ቱ": 103,
108
+ "ቲ": 104,
109
+ "ታ": 105,
110
+ "ቴ": 106,
111
+ "ት": 107,
112
+ "ቶ": 108,
113
+ "ቷ": 109,
114
+ "ቸ": 110,
115
+ "ቹ": 111,
116
+ "ቺ": 112,
117
+ "ቻ": 113,
118
+ "ቼ": 114,
119
+ "ች": 115,
120
+ "ቾ": 116,
121
+ "ኅ": 117,
122
+ "ኋ": 118,
123
+ "ነ": 119,
124
+ "ኑ": 120,
125
+ "ኒ": 121,
126
+ "ና": 122,
127
+ "ኔ": 123,
128
+ "ን": 124,
129
+ "ኖ": 125,
130
+ "ኗ": 126,
131
+ "ኘ": 127,
132
+ "ኚ": 128,
133
+ "ኛ": 129,
134
+ "ኝ": 130,
135
+ "ኞ": 131,
136
+ "አ": 132,
137
+ "ኡ": 133,
138
+ "ኢ": 134,
139
+ "ኤ": 135,
140
+ "እ": 136,
141
+ "ኦ": 137,
142
+ "ኧ": 138,
143
+ "ከ": 139,
144
+ "ኪ": 140,
145
+ "ካ": 141,
146
+ "ኬ": 142,
147
+ "ክ": 143,
148
+ "ኮ": 144,
149
+ "ወ": 145,
150
+ "ዊ": 146,
151
+ "ዋ": 147,
152
+ "ዌ": 148,
153
+ "ው": 149,
154
+ "ዎ": 150,
155
+ "ዑ": 151,
156
+ "ዓ": 152,
157
+ "ዔ": 153,
158
+ "ዕ": 154,
159
+ "ዖ": 155,
160
+ "ዘ": 156,
161
+ "ዙ": 157,
162
+ "ዚ": 158,
163
+ "ዛ": 159,
164
+ "ዜ": 160,
165
+ "ዝ": 161,
166
+ "ዞ": 162,
167
+ "ዥ": 163,
168
+ "የ": 164,
169
+ "ያ": 165,
170
+ "ዬ": 166,
171
+ "ይ": 167,
172
+ "ዮ": 168,
173
+ "ደ": 169,
174
+ "ዱ": 170,
175
+ "ዲ": 171,
176
+ "ዳ": 172,
177
+ "ዴ": 173,
178
+ "ድ": 174,
179
+ "ዶ": 175,
180
+ "ዷ": 176,
181
+ "ጀ": 177,
182
+ "ጁ": 178,
183
+ "ጂ": 179,
184
+ "ጃ": 180,
185
+ "ጄ": 181,
186
+ "ጅ": 182,
187
+ "ጆ": 183,
188
+ "ጇ": 184,
189
+ "ገ": 185,
190
+ "ጉ": 186,
191
+ "ጊ": 187,
192
+ "ጋ": 188,
193
+ "ጌ": 189,
194
+ "ግ": 190,
195
+ "ጎ": 191,
196
+ "ጐ": 192,
197
+ "ጓ": 193,
198
+ "ጠ": 194,
199
+ "ጡ": 195,
200
+ "ጢ": 196,
201
+ "ጣ": 197,
202
+ "ጤ": 198,
203
+ "ጥ": 199,
204
+ "ጦ": 200,
205
+ "ጧ": 201,
206
+ "ጨ": 202,
207
+ "ጩ": 203,
208
+ "ጫ": 204,
209
+ "ጭ": 205,
210
+ "ጮ": 206,
211
+ "ጳ": 207,
212
+ "ጴ": 208,
213
+ "ጶ": 209,
214
+ "ጻ": 210,
215
+ "ጼ": 211,
216
+ "ጽ": 212,
217
+ "ፀ": 213,
218
+ "ፁ": 214,
219
+ "ፃ": 215,
220
+ "ፅ": 216,
221
+ "ፆ": 217,
222
+ "ፈ": 218,
223
+ "ፉ": 219,
224
+ "ፊ": 220,
225
+ "ፋ": 221,
226
+ "ፌ": 222,
227
+ "ፍ": 223,
228
+ "ፎ": 224,
229
+ "ፐ": 225,
230
+ "ፑ": 226,
231
+ "ፒ": 227,
232
+ "ፓ": 228,
233
+ "ፔ": 229,
234
+ "ፕ": 230,
235
+ "ፖ": 231
236
+ }