mouseyy commited on
Commit
d2fd298
·
verified ·
1 Parent(s): 8146c9a

Training in progress, step 20

Browse files
README.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: facebook/wav2vec2-xls-r-300m
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - common_voice_17_0
9
+ metrics:
10
+ - wer
11
+ model-index:
12
+ - name: result_data-1
13
+ results:
14
+ - task:
15
+ name: Automatic Speech Recognition
16
+ type: automatic-speech-recognition
17
+ dataset:
18
+ name: common_voice_17_0
19
+ type: common_voice_17_0
20
+ config: uk
21
+ split: test
22
+ args: uk
23
+ metrics:
24
+ - name: Wer
25
+ type: wer
26
+ value: 0.36512878573450325
27
+ ---
28
+
29
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
30
+ should probably proofread and complete it, then remove this comment. -->
31
+
32
+ # result_data-1
33
+
34
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice_17_0 dataset.
35
+ It achieves the following results on the evaluation set:
36
+ - Loss: 0.2220
37
+ - Wer: 0.3651
38
+ - Cer: 0.1691
39
+
40
+ ## Model description
41
+
42
+ More information needed
43
+
44
+ ## Intended uses & limitations
45
+
46
+ More information needed
47
+
48
+ ## Training and evaluation data
49
+
50
+ More information needed
51
+
52
+ ## Training procedure
53
+
54
+ ### Training hyperparameters
55
+
56
+ The following hyperparameters were used during training:
57
+ - learning_rate: 6.532628754904162e-05
58
+ - train_batch_size: 16
59
+ - eval_batch_size: 16
60
+ - seed: 42
61
+ - distributed_type: multi-GPU
62
+ - num_devices: 2
63
+ - total_train_batch_size: 32
64
+ - total_eval_batch_size: 32
65
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
66
+ - lr_scheduler_type: linear
67
+ - lr_scheduler_warmup_steps: 206
68
+ - num_epochs: 7.0
69
+ - mixed_precision_training: Native AMP
70
+
71
+ ### Training results
72
+
73
+ | Training Loss | Epoch | Step | Validation Loss | Wer | Cer |
74
+ |:-------------:|:------:|:----:|:---------------:|:------:|:------:|
75
+ | 0.6324 | 0.9099 | 1000 | 0.5004 | 0.6083 | 0.2381 |
76
+ | 0.3497 | 1.8198 | 2000 | 0.3087 | 0.4650 | 0.1965 |
77
+ | 0.2642 | 2.7298 | 3000 | 0.2636 | 0.4249 | 0.1841 |
78
+ | 0.2328 | 3.6397 | 4000 | 0.2431 | 0.3960 | 0.1789 |
79
+ | 0.1933 | 4.5496 | 5000 | 0.2289 | 0.3773 | 0.1732 |
80
+ | 0.1783 | 5.4595 | 6000 | 0.2300 | 0.3728 | 0.1711 |
81
+ | 0.1617 | 6.3694 | 7000 | 0.2233 | 0.3637 | 0.1700 |
82
+
83
+
84
+ ### Framework versions
85
+
86
+ - Transformers 4.49.0
87
+ - Pytorch 2.6.0+cu124
88
+ - Datasets 3.3.2
89
+ - Tokenizers 0.21.0
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "</s>": 39,
3
+ "<s>": 38
4
+ }
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.0,
3
+ "eval_cer": 0.16905169176853435,
4
+ "eval_loss": 0.22203268110752106,
5
+ "eval_runtime": 16.4511,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 30.393,
8
+ "eval_steps_per_second": 0.973,
9
+ "eval_wer": 0.36512878573450325,
10
+ "total_flos": 5.670215301911177e+19,
11
+ "train_loss": 0.6279906528631082,
12
+ "train_runtime": 12613.1325,
13
+ "train_samples": 35144,
14
+ "train_samples_per_second": 19.504,
15
+ "train_steps_per_second": 0.61
16
+ }
config.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.018610230386620467,
4
+ "adapter_attn_dim": null,
5
+ "adapter_kernel_size": 3,
6
+ "adapter_stride": 2,
7
+ "add_adapter": false,
8
+ "apply_spec_augment": true,
9
+ "architectures": [
10
+ "Wav2Vec2ForCTC"
11
+ ],
12
+ "attention_dropout": 0.032850096694511215,
13
+ "bos_token_id": 1,
14
+ "classifier_proj_size": 256,
15
+ "codevector_dim": 768,
16
+ "contrastive_logits_temperature": 0.1,
17
+ "conv_bias": true,
18
+ "conv_dim": [
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512,
25
+ 512
26
+ ],
27
+ "conv_kernel": [
28
+ 10,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 3,
33
+ 2,
34
+ 2
35
+ ],
36
+ "conv_stride": [
37
+ 5,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2,
43
+ 2
44
+ ],
45
+ "ctc_loss_reduction": "mean",
46
+ "ctc_zero_infinity": false,
47
+ "diversity_loss_weight": 0.1,
48
+ "do_stable_layer_norm": true,
49
+ "eos_token_id": 2,
50
+ "feat_extract_activation": "gelu",
51
+ "feat_extract_dropout": 0.0,
52
+ "feat_extract_norm": "layer",
53
+ "feat_proj_dropout": 0.07440213561571356,
54
+ "feat_quantizer_dropout": 0.0,
55
+ "final_dropout": 0.0,
56
+ "gradient_checkpointing": false,
57
+ "hidden_act": "gelu",
58
+ "hidden_dropout": 0.031517397410015094,
59
+ "hidden_size": 1024,
60
+ "initializer_range": 0.02,
61
+ "intermediate_size": 4096,
62
+ "layer_norm_eps": 1e-05,
63
+ "layerdrop": 0.018610230386620467,
64
+ "mask_feature_length": 10,
65
+ "mask_feature_min_masks": 0,
66
+ "mask_feature_prob": 0.0,
67
+ "mask_time_length": 10,
68
+ "mask_time_min_masks": 2,
69
+ "mask_time_prob": 0.039995615649618,
70
+ "model_type": "wav2vec2",
71
+ "num_adapter_layers": 3,
72
+ "num_attention_heads": 16,
73
+ "num_codevector_groups": 2,
74
+ "num_codevectors_per_group": 320,
75
+ "num_conv_pos_embedding_groups": 16,
76
+ "num_conv_pos_embeddings": 128,
77
+ "num_feat_extract_layers": 7,
78
+ "num_hidden_layers": 24,
79
+ "num_negatives": 100,
80
+ "output_hidden_size": 1024,
81
+ "pad_token_id": 37,
82
+ "proj_codevector_dim": 768,
83
+ "tdnn_dilation": [
84
+ 1,
85
+ 2,
86
+ 3,
87
+ 1,
88
+ 1
89
+ ],
90
+ "tdnn_dim": [
91
+ 512,
92
+ 512,
93
+ 512,
94
+ 512,
95
+ 1500
96
+ ],
97
+ "tdnn_kernel": [
98
+ 5,
99
+ 3,
100
+ 3,
101
+ 1,
102
+ 1
103
+ ],
104
+ "torch_dtype": "float32",
105
+ "transformers_version": "4.49.0",
106
+ "use_weighted_layer_sum": false,
107
+ "vocab_size": 40,
108
+ "xvector_output_dim": 512
109
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.0,
3
+ "eval_cer": 0.16905169176853435,
4
+ "eval_loss": 0.22203268110752106,
5
+ "eval_runtime": 16.4511,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 30.393,
8
+ "eval_steps_per_second": 0.973,
9
+ "eval_wer": 0.36512878573450325
10
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c140e98e471787fea4b8f014514167fcf0836cc54f031c1e5b597f3fc88d79a9
3
+ size 1261971480
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "processor_class": "Wav2Vec2Processor",
8
+ "return_attention_mask": true,
9
+ "sampling_rate": 16000
10
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": true,
19
+ "normalized": false,
20
+ "rstrip": true,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "[UNK]",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": true,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "36": {
4
+ "content": "[UNK]",
5
+ "lstrip": true,
6
+ "normalized": false,
7
+ "rstrip": true,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "37": {
12
+ "content": "[PAD]",
13
+ "lstrip": true,
14
+ "normalized": false,
15
+ "rstrip": true,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "38": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "39": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "<s>",
37
+ "clean_up_tokenization_spaces": false,
38
+ "do_lower_case": false,
39
+ "eos_token": "</s>",
40
+ "extra_special_tokens": {},
41
+ "model_max_length": 1000000000000000019884624838656,
42
+ "pad_token": "[PAD]",
43
+ "processor_class": "Wav2Vec2Processor",
44
+ "replace_word_delimiter_char": " ",
45
+ "target_lang": null,
46
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
47
+ "unk_token": "[UNK]",
48
+ "word_delimiter_token": "|"
49
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.0,
3
+ "total_flos": 5.670215301911177e+19,
4
+ "train_loss": 0.6279906528631082,
5
+ "train_runtime": 12613.1325,
6
+ "train_samples": 35144,
7
+ "train_samples_per_second": 19.504,
8
+ "train_steps_per_second": 0.61
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 7.0,
5
+ "eval_steps": 1000.0,
6
+ "global_step": 7693,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.09099181073703366,
13
+ "grad_norm": 12.070003509521484,
14
+ "learning_rate": 3.17117900723503e-05,
15
+ "loss": 11.3381,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.18198362147406733,
20
+ "grad_norm": 2.9062180519104004,
21
+ "learning_rate": 6.34235801447006e-05,
22
+ "loss": 4.0713,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.272975432211101,
27
+ "grad_norm": 2.260572910308838,
28
+ "learning_rate": 6.45061097702771e-05,
29
+ "loss": 3.2752,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.36396724294813465,
34
+ "grad_norm": 0.8717735409736633,
35
+ "learning_rate": 6.363358021839996e-05,
36
+ "loss": 3.1816,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.4549590536851683,
41
+ "grad_norm": 0.6315112113952637,
42
+ "learning_rate": 6.276105066652282e-05,
43
+ "loss": 3.1526,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.545950864422202,
48
+ "grad_norm": 2.5357179641723633,
49
+ "learning_rate": 6.188852111464567e-05,
50
+ "loss": 2.9969,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.6369426751592356,
55
+ "grad_norm": 1.636403203010559,
56
+ "learning_rate": 6.101599156276853e-05,
57
+ "loss": 1.6074,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.7279344858962693,
62
+ "grad_norm": 1.3069971799850464,
63
+ "learning_rate": 6.014346201089139e-05,
64
+ "loss": 0.9392,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.818926296633303,
69
+ "grad_norm": 1.4789917469024658,
70
+ "learning_rate": 5.927093245901425e-05,
71
+ "loss": 0.7199,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.9099181073703366,
76
+ "grad_norm": 1.1590214967727661,
77
+ "learning_rate": 5.839840290713711e-05,
78
+ "loss": 0.6324,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 0.9099181073703366,
83
+ "eval_cer": 0.23809921148149693,
84
+ "eval_loss": 0.5003824830055237,
85
+ "eval_runtime": 16.9015,
86
+ "eval_samples_per_second": 29.583,
87
+ "eval_steps_per_second": 0.947,
88
+ "eval_wer": 0.6082649306538352,
89
+ "step": 1000
90
+ },
91
+ {
92
+ "epoch": 1.0009099181073704,
93
+ "grad_norm": 1.4140914678573608,
94
+ "learning_rate": 5.752587335525997e-05,
95
+ "loss": 0.5548,
96
+ "step": 1100
97
+ },
98
+ {
99
+ "epoch": 1.091901728844404,
100
+ "grad_norm": 2.3637049198150635,
101
+ "learning_rate": 5.6653343803382825e-05,
102
+ "loss": 0.4844,
103
+ "step": 1200
104
+ },
105
+ {
106
+ "epoch": 1.1828935395814377,
107
+ "grad_norm": 1.3760229349136353,
108
+ "learning_rate": 5.578081425150568e-05,
109
+ "loss": 0.466,
110
+ "step": 1300
111
+ },
112
+ {
113
+ "epoch": 1.2738853503184713,
114
+ "grad_norm": 1.446753978729248,
115
+ "learning_rate": 5.490828469962854e-05,
116
+ "loss": 0.4387,
117
+ "step": 1400
118
+ },
119
+ {
120
+ "epoch": 1.364877161055505,
121
+ "grad_norm": 1.2168169021606445,
122
+ "learning_rate": 5.40357551477514e-05,
123
+ "loss": 0.409,
124
+ "step": 1500
125
+ },
126
+ {
127
+ "epoch": 1.4558689717925386,
128
+ "grad_norm": 1.9888843297958374,
129
+ "learning_rate": 5.3163225595874256e-05,
130
+ "loss": 0.3875,
131
+ "step": 1600
132
+ },
133
+ {
134
+ "epoch": 1.5468607825295724,
135
+ "grad_norm": 1.507049560546875,
136
+ "learning_rate": 5.2290696043997114e-05,
137
+ "loss": 0.3851,
138
+ "step": 1700
139
+ },
140
+ {
141
+ "epoch": 1.6378525932666061,
142
+ "grad_norm": 2.9892451763153076,
143
+ "learning_rate": 5.141816649211998e-05,
144
+ "loss": 0.372,
145
+ "step": 1800
146
+ },
147
+ {
148
+ "epoch": 1.7288444040036397,
149
+ "grad_norm": 1.2426109313964844,
150
+ "learning_rate": 5.0545636940242836e-05,
151
+ "loss": 0.3753,
152
+ "step": 1900
153
+ },
154
+ {
155
+ "epoch": 1.8198362147406733,
156
+ "grad_norm": 1.0674372911453247,
157
+ "learning_rate": 4.9673107388365694e-05,
158
+ "loss": 0.3497,
159
+ "step": 2000
160
+ },
161
+ {
162
+ "epoch": 1.8198362147406733,
163
+ "eval_cer": 0.196545537986566,
164
+ "eval_loss": 0.30870166420936584,
165
+ "eval_runtime": 16.7127,
166
+ "eval_samples_per_second": 29.917,
167
+ "eval_steps_per_second": 0.957,
168
+ "eval_wer": 0.4650438720634022,
169
+ "step": 2000
170
+ },
171
+ {
172
+ "epoch": 1.910828025477707,
173
+ "grad_norm": 1.1968690156936646,
174
+ "learning_rate": 4.880057783648855e-05,
175
+ "loss": 0.3393,
176
+ "step": 2100
177
+ },
178
+ {
179
+ "epoch": 2.001819836214741,
180
+ "grad_norm": 1.350705623626709,
181
+ "learning_rate": 4.792804828461141e-05,
182
+ "loss": 0.3421,
183
+ "step": 2200
184
+ },
185
+ {
186
+ "epoch": 2.092811646951774,
187
+ "grad_norm": 1.293790340423584,
188
+ "learning_rate": 4.705551873273427e-05,
189
+ "loss": 0.3002,
190
+ "step": 2300
191
+ },
192
+ {
193
+ "epoch": 2.183803457688808,
194
+ "grad_norm": 1.174820065498352,
195
+ "learning_rate": 4.618298918085712e-05,
196
+ "loss": 0.3017,
197
+ "step": 2400
198
+ },
199
+ {
200
+ "epoch": 2.2747952684258417,
201
+ "grad_norm": 1.1537854671478271,
202
+ "learning_rate": 4.5310459628979975e-05,
203
+ "loss": 0.2996,
204
+ "step": 2500
205
+ },
206
+ {
207
+ "epoch": 2.3657870791628755,
208
+ "grad_norm": 1.1924264430999756,
209
+ "learning_rate": 4.443793007710283e-05,
210
+ "loss": 0.2893,
211
+ "step": 2600
212
+ },
213
+ {
214
+ "epoch": 2.4567788898999092,
215
+ "grad_norm": 0.8446494936943054,
216
+ "learning_rate": 4.356540052522569e-05,
217
+ "loss": 0.2789,
218
+ "step": 2700
219
+ },
220
+ {
221
+ "epoch": 2.5477707006369426,
222
+ "grad_norm": 1.1881588697433472,
223
+ "learning_rate": 4.2692870973348555e-05,
224
+ "loss": 0.2776,
225
+ "step": 2800
226
+ },
227
+ {
228
+ "epoch": 2.6387625113739763,
229
+ "grad_norm": 1.0319584608078003,
230
+ "learning_rate": 4.182034142147141e-05,
231
+ "loss": 0.2645,
232
+ "step": 2900
233
+ },
234
+ {
235
+ "epoch": 2.72975432211101,
236
+ "grad_norm": 0.9115346670150757,
237
+ "learning_rate": 4.094781186959427e-05,
238
+ "loss": 0.2642,
239
+ "step": 3000
240
+ },
241
+ {
242
+ "epoch": 2.72975432211101,
243
+ "eval_cer": 0.18407109182694315,
244
+ "eval_loss": 0.2636227309703827,
245
+ "eval_runtime": 16.7759,
246
+ "eval_samples_per_second": 29.805,
247
+ "eval_steps_per_second": 0.954,
248
+ "eval_wer": 0.4248514010755732,
249
+ "step": 3000
250
+ },
251
+ {
252
+ "epoch": 2.8207461328480434,
253
+ "grad_norm": 1.4283329248428345,
254
+ "learning_rate": 4.007528231771713e-05,
255
+ "loss": 0.279,
256
+ "step": 3100
257
+ },
258
+ {
259
+ "epoch": 2.911737943585077,
260
+ "grad_norm": 0.9780188798904419,
261
+ "learning_rate": 3.9202752765839986e-05,
262
+ "loss": 0.2656,
263
+ "step": 3200
264
+ },
265
+ {
266
+ "epoch": 3.002729754322111,
267
+ "grad_norm": 0.9775941371917725,
268
+ "learning_rate": 3.8330223213962844e-05,
269
+ "loss": 0.2661,
270
+ "step": 3300
271
+ },
272
+ {
273
+ "epoch": 3.0937215650591448,
274
+ "grad_norm": 1.2078616619110107,
275
+ "learning_rate": 3.74576936620857e-05,
276
+ "loss": 0.2331,
277
+ "step": 3400
278
+ },
279
+ {
280
+ "epoch": 3.1847133757961785,
281
+ "grad_norm": 1.5175094604492188,
282
+ "learning_rate": 3.658516411020856e-05,
283
+ "loss": 0.2413,
284
+ "step": 3500
285
+ },
286
+ {
287
+ "epoch": 3.275705186533212,
288
+ "grad_norm": 1.2120680809020996,
289
+ "learning_rate": 3.571263455833142e-05,
290
+ "loss": 0.2272,
291
+ "step": 3600
292
+ },
293
+ {
294
+ "epoch": 3.3666969972702456,
295
+ "grad_norm": 1.4247957468032837,
296
+ "learning_rate": 3.484010500645428e-05,
297
+ "loss": 0.2309,
298
+ "step": 3700
299
+ },
300
+ {
301
+ "epoch": 3.4576888080072794,
302
+ "grad_norm": 0.9986005425453186,
303
+ "learning_rate": 3.396757545457714e-05,
304
+ "loss": 0.2364,
305
+ "step": 3800
306
+ },
307
+ {
308
+ "epoch": 3.548680618744313,
309
+ "grad_norm": 1.8682105541229248,
310
+ "learning_rate": 3.30950459027e-05,
311
+ "loss": 0.2366,
312
+ "step": 3900
313
+ },
314
+ {
315
+ "epoch": 3.6396724294813465,
316
+ "grad_norm": 1.6900557279586792,
317
+ "learning_rate": 3.2222516350822855e-05,
318
+ "loss": 0.2328,
319
+ "step": 4000
320
+ },
321
+ {
322
+ "epoch": 3.6396724294813465,
323
+ "eval_cer": 0.17885602236221787,
324
+ "eval_loss": 0.24313220381736755,
325
+ "eval_runtime": 16.6038,
326
+ "eval_samples_per_second": 30.114,
327
+ "eval_steps_per_second": 0.964,
328
+ "eval_wer": 0.3959807529012171,
329
+ "step": 4000
330
+ },
331
+ {
332
+ "epoch": 3.7306642402183803,
333
+ "grad_norm": 1.7356419563293457,
334
+ "learning_rate": 3.134998679894571e-05,
335
+ "loss": 0.2165,
336
+ "step": 4100
337
+ },
338
+ {
339
+ "epoch": 3.821656050955414,
340
+ "grad_norm": 1.163643479347229,
341
+ "learning_rate": 3.0477457247068567e-05,
342
+ "loss": 0.2151,
343
+ "step": 4200
344
+ },
345
+ {
346
+ "epoch": 3.912647861692448,
347
+ "grad_norm": 1.036080241203308,
348
+ "learning_rate": 2.9604927695191425e-05,
349
+ "loss": 0.218,
350
+ "step": 4300
351
+ },
352
+ {
353
+ "epoch": 4.003639672429482,
354
+ "grad_norm": 4.445271015167236,
355
+ "learning_rate": 2.8732398143314286e-05,
356
+ "loss": 0.2095,
357
+ "step": 4400
358
+ },
359
+ {
360
+ "epoch": 4.094631483166515,
361
+ "grad_norm": 1.1208213567733765,
362
+ "learning_rate": 2.7859868591437143e-05,
363
+ "loss": 0.2027,
364
+ "step": 4500
365
+ },
366
+ {
367
+ "epoch": 4.185623293903548,
368
+ "grad_norm": 1.2286118268966675,
369
+ "learning_rate": 2.6987339039559998e-05,
370
+ "loss": 0.2022,
371
+ "step": 4600
372
+ },
373
+ {
374
+ "epoch": 4.276615104640582,
375
+ "grad_norm": 0.8969898819923401,
376
+ "learning_rate": 2.6114809487682856e-05,
377
+ "loss": 0.2038,
378
+ "step": 4700
379
+ },
380
+ {
381
+ "epoch": 4.367606915377616,
382
+ "grad_norm": 0.9858496785163879,
383
+ "learning_rate": 2.5242279935805717e-05,
384
+ "loss": 0.1959,
385
+ "step": 4800
386
+ },
387
+ {
388
+ "epoch": 4.45859872611465,
389
+ "grad_norm": 0.8040638566017151,
390
+ "learning_rate": 2.4369750383928574e-05,
391
+ "loss": 0.2075,
392
+ "step": 4900
393
+ },
394
+ {
395
+ "epoch": 4.549590536851683,
396
+ "grad_norm": 1.1833239793777466,
397
+ "learning_rate": 2.3497220832051432e-05,
398
+ "loss": 0.1933,
399
+ "step": 5000
400
+ },
401
+ {
402
+ "epoch": 4.549590536851683,
403
+ "eval_cer": 0.17322374734031457,
404
+ "eval_loss": 0.22891011834144592,
405
+ "eval_runtime": 16.6972,
406
+ "eval_samples_per_second": 29.945,
407
+ "eval_steps_per_second": 0.958,
408
+ "eval_wer": 0.3772997452589867,
409
+ "step": 5000
410
+ },
411
+ {
412
+ "epoch": 4.640582347588717,
413
+ "grad_norm": 2.7539749145507812,
414
+ "learning_rate": 2.262469128017429e-05,
415
+ "loss": 0.2037,
416
+ "step": 5100
417
+ },
418
+ {
419
+ "epoch": 4.731574158325751,
420
+ "grad_norm": 1.6865957975387573,
421
+ "learning_rate": 2.1752161728297148e-05,
422
+ "loss": 0.1968,
423
+ "step": 5200
424
+ },
425
+ {
426
+ "epoch": 4.822565969062785,
427
+ "grad_norm": 0.922374427318573,
428
+ "learning_rate": 2.087963217642001e-05,
429
+ "loss": 0.1999,
430
+ "step": 5300
431
+ },
432
+ {
433
+ "epoch": 4.9135577797998184,
434
+ "grad_norm": 0.786503791809082,
435
+ "learning_rate": 2.0007102624542866e-05,
436
+ "loss": 0.1885,
437
+ "step": 5400
438
+ },
439
+ {
440
+ "epoch": 5.004549590536851,
441
+ "grad_norm": 0.8755506277084351,
442
+ "learning_rate": 1.913457307266572e-05,
443
+ "loss": 0.1916,
444
+ "step": 5500
445
+ },
446
+ {
447
+ "epoch": 5.095541401273885,
448
+ "grad_norm": 1.141218662261963,
449
+ "learning_rate": 1.826204352078858e-05,
450
+ "loss": 0.1806,
451
+ "step": 5600
452
+ },
453
+ {
454
+ "epoch": 5.186533212010919,
455
+ "grad_norm": 1.0689939260482788,
456
+ "learning_rate": 1.7389513968911436e-05,
457
+ "loss": 0.1849,
458
+ "step": 5700
459
+ },
460
+ {
461
+ "epoch": 5.277525022747953,
462
+ "grad_norm": 1.020820140838623,
463
+ "learning_rate": 1.6516984417034297e-05,
464
+ "loss": 0.1699,
465
+ "step": 5800
466
+ },
467
+ {
468
+ "epoch": 5.368516833484986,
469
+ "grad_norm": 1.7188619375228882,
470
+ "learning_rate": 1.5644454865157155e-05,
471
+ "loss": 0.1816,
472
+ "step": 5900
473
+ },
474
+ {
475
+ "epoch": 5.45950864422202,
476
+ "grad_norm": 1.033896565437317,
477
+ "learning_rate": 1.4771925313280013e-05,
478
+ "loss": 0.1783,
479
+ "step": 6000
480
+ },
481
+ {
482
+ "epoch": 5.45950864422202,
483
+ "eval_cer": 0.17109599899870667,
484
+ "eval_loss": 0.22998099029064178,
485
+ "eval_runtime": 16.5438,
486
+ "eval_samples_per_second": 30.223,
487
+ "eval_steps_per_second": 0.967,
488
+ "eval_wer": 0.37277101613359753,
489
+ "step": 6000
490
+ },
491
+ {
492
+ "epoch": 5.550500454959054,
493
+ "grad_norm": 2.6767489910125732,
494
+ "learning_rate": 1.3899395761402872e-05,
495
+ "loss": 0.1741,
496
+ "step": 6100
497
+ },
498
+ {
499
+ "epoch": 5.641492265696087,
500
+ "grad_norm": 1.5569095611572266,
501
+ "learning_rate": 1.3026866209525728e-05,
502
+ "loss": 0.1769,
503
+ "step": 6200
504
+ },
505
+ {
506
+ "epoch": 5.732484076433121,
507
+ "grad_norm": 0.6999034881591797,
508
+ "learning_rate": 1.2154336657648588e-05,
509
+ "loss": 0.176,
510
+ "step": 6300
511
+ },
512
+ {
513
+ "epoch": 5.823475887170154,
514
+ "grad_norm": 1.3141757249832153,
515
+ "learning_rate": 1.1281807105771445e-05,
516
+ "loss": 0.1763,
517
+ "step": 6400
518
+ },
519
+ {
520
+ "epoch": 5.914467697907188,
521
+ "grad_norm": 1.060334324836731,
522
+ "learning_rate": 1.0409277553894303e-05,
523
+ "loss": 0.1727,
524
+ "step": 6500
525
+ },
526
+ {
527
+ "epoch": 6.005459508644222,
528
+ "grad_norm": 1.0684072971343994,
529
+ "learning_rate": 9.53674800201716e-06,
530
+ "loss": 0.1696,
531
+ "step": 6600
532
+ },
533
+ {
534
+ "epoch": 6.096451319381256,
535
+ "grad_norm": 1.1349165439605713,
536
+ "learning_rate": 8.664218450140019e-06,
537
+ "loss": 0.1613,
538
+ "step": 6700
539
+ },
540
+ {
541
+ "epoch": 6.1874431301182895,
542
+ "grad_norm": 0.6644859313964844,
543
+ "learning_rate": 7.791688898262876e-06,
544
+ "loss": 0.1642,
545
+ "step": 6800
546
+ },
547
+ {
548
+ "epoch": 6.278434940855323,
549
+ "grad_norm": 0.9588508009910583,
550
+ "learning_rate": 6.919159346385735e-06,
551
+ "loss": 0.1679,
552
+ "step": 6900
553
+ },
554
+ {
555
+ "epoch": 6.369426751592357,
556
+ "grad_norm": 1.6633976697921753,
557
+ "learning_rate": 6.0466297945085934e-06,
558
+ "loss": 0.1617,
559
+ "step": 7000
560
+ },
561
+ {
562
+ "epoch": 6.369426751592357,
563
+ "eval_cer": 0.169969543994326,
564
+ "eval_loss": 0.2233332395553589,
565
+ "eval_runtime": 16.6328,
566
+ "eval_samples_per_second": 30.061,
567
+ "eval_steps_per_second": 0.962,
568
+ "eval_wer": 0.3637135578828191,
569
+ "step": 7000
570
+ },
571
+ {
572
+ "epoch": 6.460418562329391,
573
+ "grad_norm": 0.7526870369911194,
574
+ "learning_rate": 5.174100242631451e-06,
575
+ "loss": 0.1678,
576
+ "step": 7100
577
+ },
578
+ {
579
+ "epoch": 6.551410373066424,
580
+ "grad_norm": 1.6900984048843384,
581
+ "learning_rate": 4.30157069075431e-06,
582
+ "loss": 0.1653,
583
+ "step": 7200
584
+ },
585
+ {
586
+ "epoch": 6.6424021838034575,
587
+ "grad_norm": 0.7345483303070068,
588
+ "learning_rate": 3.429041138877168e-06,
589
+ "loss": 0.1662,
590
+ "step": 7300
591
+ },
592
+ {
593
+ "epoch": 6.733393994540491,
594
+ "grad_norm": 0.7114779949188232,
595
+ "learning_rate": 2.5565115870000256e-06,
596
+ "loss": 0.1579,
597
+ "step": 7400
598
+ },
599
+ {
600
+ "epoch": 6.824385805277525,
601
+ "grad_norm": 0.9554657936096191,
602
+ "learning_rate": 1.683982035122884e-06,
603
+ "loss": 0.1588,
604
+ "step": 7500
605
+ },
606
+ {
607
+ "epoch": 6.915377616014559,
608
+ "grad_norm": 0.8747773170471191,
609
+ "learning_rate": 8.114524832457419e-07,
610
+ "loss": 0.1651,
611
+ "step": 7600
612
+ },
613
+ {
614
+ "epoch": 7.0,
615
+ "step": 7693,
616
+ "total_flos": 5.670215301911177e+19,
617
+ "train_loss": 0.6279906528631082,
618
+ "train_runtime": 12613.1325,
619
+ "train_samples_per_second": 19.504,
620
+ "train_steps_per_second": 0.61
621
+ }
622
+ ],
623
+ "logging_steps": 100,
624
+ "max_steps": 7693,
625
+ "num_input_tokens_seen": 0,
626
+ "num_train_epochs": 7,
627
+ "save_steps": 1000,
628
+ "stateful_callbacks": {
629
+ "TrainerControl": {
630
+ "args": {
631
+ "should_epoch_stop": false,
632
+ "should_evaluate": false,
633
+ "should_log": false,
634
+ "should_save": true,
635
+ "should_training_stop": true
636
+ },
637
+ "attributes": {}
638
+ }
639
+ },
640
+ "total_flos": 5.670215301911177e+19,
641
+ "train_batch_size": 16,
642
+ "trial_name": null,
643
+ "trial_params": null
644
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d06d4c43237644fa8843b9159fa54e0af1206b1470713744f05cad000c04433
3
+ size 5432
vocab.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "+": 18,
3
+ "[PAD]": 37,
4
+ "[UNK]": 36,
5
+ "|": 34,
6
+ "а": 31,
7
+ "б": 29,
8
+ "в": 33,
9
+ "г": 6,
10
+ "д": 27,
11
+ "е": 10,
12
+ "ж": 5,
13
+ "з": 21,
14
+ "и": 35,
15
+ "й": 25,
16
+ "к": 30,
17
+ "л": 12,
18
+ "м": 3,
19
+ "н": 0,
20
+ "о": 9,
21
+ "п": 28,
22
+ "р": 14,
23
+ "с": 20,
24
+ "т": 11,
25
+ "у": 19,
26
+ "ф": 4,
27
+ "х": 2,
28
+ "ц": 17,
29
+ "ч": 7,
30
+ "ш": 13,
31
+ "щ": 26,
32
+ "ь": 24,
33
+ "ю": 22,
34
+ "я": 15,
35
+ "є": 8,
36
+ "і": 32,
37
+ "ї": 16,
38
+ "ґ": 23,
39
+ "’": 1
40
+ }