caoldr commited on
Commit
9eb06e6
·
1 Parent(s): b2d1eb5

adding missing files

Browse files
Files changed (43) hide show
  1. .gitattributes +2 -1
  2. .gitignore +1 -0
  3. README.md +88 -0
  4. added_tokens.json +109 -0
  5. all_results.json +12 -0
  6. eval_results.json +8 -0
  7. merges.txt +0 -0
  8. normalizer.json +1742 -0
  9. preprocessor_config.json +0 -0
  10. pytorch_model.bin +3 -0
  11. run.sh +48 -0
  12. run_speech_recognition_seq2seq_streaming.py +769 -0
  13. runs/Dec18_19-31-26_150-136-92-72/1671392859.803059/events.out.tfevents.1671392859.150-136-92-72.3522376.1 +3 -0
  14. runs/Dec18_19-31-26_150-136-92-72/events.out.tfevents.1671392859.150-136-92-72.3522376.0 +3 -0
  15. runs/Dec18_19-31-26_150-136-92-72/events.out.tfevents.1671450244.150-136-92-72.3522376.2 +3 -0
  16. special_tokens_map.json +133 -0
  17. tokenizer_config.json +36 -0
  18. train_results.json +7 -0
  19. trainer_state.json +2515 -0
  20. training_args.bin +3 -0
  21. vocab.json +0 -0
  22. ~/.cache/models--openai--whisper-medium/.no_exist/a0b3589e1034234495a1b696c28d4832cdaf8a32/generation_config.json +0 -0
  23. ~/.cache/models--openai--whisper-medium/.no_exist/a0b3589e1034234495a1b696c28d4832cdaf8a32/tokenizer.json +0 -0
  24. ~/.cache/models--openai--whisper-medium/blobs/0f3456460629e21d559c6daa23ab6ce3644e8271 +0 -0
  25. ~/.cache/models--openai--whisper-medium/blobs/3a00c89ee5e8ae0cb159a6ec838843fb2266fac6 +0 -0
  26. ~/.cache/models--openai--whisper-medium/blobs/47e9dd31523ecea227504afad3870da1cfe5ad81 +109 -0
  27. ~/.cache/models--openai--whisper-medium/blobs/5e6c8377adf6019428b34a1ad906fb43de71d387 +36 -0
  28. ~/.cache/models--openai--whisper-medium/blobs/9115b6806f75d5122486b0e1ae0279a0207199c2 +133 -0
  29. ~/.cache/models--openai--whisper-medium/blobs/96d734d68ad5d63c8f41d525f5769788432f6963f32dbe36feefaa33d736a962 +3 -0
  30. ~/.cache/models--openai--whisper-medium/blobs/c2048dfa9fd94a052e62e908d2c4dfb18534b4d2 +0 -0
  31. ~/.cache/models--openai--whisper-medium/blobs/dd6ae819ad738ac1a546e9f9282ef325c33b9ea0 +1742 -0
  32. ~/.cache/models--openai--whisper-medium/blobs/f84be5dbc1bfd09035c3fd3e01b777bc47f14a66 +142 -0
  33. ~/.cache/models--openai--whisper-medium/refs/main +1 -0
  34. ~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/added_tokens.json +1 -0
  35. ~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/config.json +1 -0
  36. ~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/merges.txt +1 -0
  37. ~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/normalizer.json +1 -0
  38. ~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/preprocessor_config.json +1 -0
  39. ~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/pytorch_model.bin +3 -0
  40. ~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/special_tokens_map.json +1 -0
  41. ~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/tokenizer_config.json +1 -0
  42. ~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/vocab.json +1 -0
  43. ~/.cache/tmp22vcmo7s +3 -0
.gitattributes CHANGED
@@ -25,7 +25,6 @@
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
  *.wasm filter=lfs diff=lfs merge=lfs -text
@@ -33,3 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ ~/.cache/models--openai--whisper-medium/blobs/96d734d68ad5d63c8f41d525f5769788432f6963f32dbe36feefaa33d736a962 filter=lfs diff=lfs merge=lfs -text
36
+ ~/.cache/tmp22vcmo7s filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - da
4
+ license: apache-2.0
5
+ tags:
6
+ - generated_from_trainer
7
+ - hf-asr-leaderboard
8
+ - whisper-event
9
+ datasets:
10
+ - mozilla-foundation/common_voice_11_0
11
+ metrics:
12
+ - wer
13
+ model-index:
14
+ - name: Whisper Medium Danish (CV11 + FLEAURS)
15
+ results:
16
+ - task:
17
+ name: Automatic Speech Recognition
18
+ type: automatic-speech-recognition
19
+ dataset:
20
+ name: mozilla-foundation/common_voice_11_0
21
+ type: mozilla-foundation/common_voice_11_0
22
+ config: da
23
+ split: test
24
+ metrics:
25
+ - name: Wer
26
+ type: wer
27
+ value: 13.708574434508153
28
+ ---
29
+
30
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
31
+ should probably proofread and complete it, then remove this comment. -->
32
+
33
+ # Whisper Medium Danish (CV11 + FLEAURS)
34
+
35
+ This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the mozilla-foundation/common_voice_11_0,google/fleurs da,da_dk dataset.
36
+ It achieves the following results on the evaluation set:
37
+ - Loss: 0.5814
38
+ - Wer: 13.7086
39
+
40
+ ## Model description
41
+
42
+ More information needed
43
+
44
+ ## Intended uses & limitations
45
+
46
+ More information needed
47
+
48
+ ## Training and evaluation data
49
+
50
+ More information needed
51
+
52
+ ## Training procedure
53
+
54
+ ### Training hyperparameters
55
+
56
+ The following hyperparameters were used during training:
57
+ - learning_rate: 8e-06
58
+ - train_batch_size: 32
59
+ - eval_batch_size: 8
60
+ - seed: 42
61
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
+ - lr_scheduler_type: linear
63
+ - lr_scheduler_warmup_steps: 500
64
+ - training_steps: 10000
65
+ - mixed_precision_training: Native AMP
66
+
67
+ ### Training results
68
+
69
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
70
+ |:-------------:|:-----:|:-----:|:---------------:|:-------:|
71
+ | 0.0265 | 3.14 | 1000 | 0.3690 | 14.7607 |
72
+ | 0.0063 | 6.29 | 2000 | 0.4342 | 14.0926 |
73
+ | 0.0016 | 9.43 | 3000 | 0.4847 | 14.3609 |
74
+ | 0.002 | 12.58 | 4000 | 0.4919 | 14.1715 |
75
+ | 0.0013 | 15.72 | 5000 | 0.5114 | 14.2294 |
76
+ | 0.0014 | 18.87 | 6000 | 0.5197 | 13.9137 |
77
+ | 0.0003 | 22.01 | 7000 | 0.5422 | 14.1978 |
78
+ | 0.0001 | 25.16 | 8000 | 0.5659 | 13.8716 |
79
+ | 0.0001 | 28.3 | 9000 | 0.5772 | 13.7296 |
80
+ | 0.0001 | 31.45 | 10000 | 0.5814 | 13.7086 |
81
+
82
+
83
+ ### Framework versions
84
+
85
+ - Transformers 4.26.0.dev0
86
+ - Pytorch 1.13.1+cu117
87
+ - Datasets 2.7.1.dev0
88
+ - Tokenizers 0.13.2
added_tokens.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|af|>": 50327,
3
+ "<|am|>": 50334,
4
+ "<|ar|>": 50272,
5
+ "<|as|>": 50350,
6
+ "<|az|>": 50304,
7
+ "<|ba|>": 50355,
8
+ "<|be|>": 50330,
9
+ "<|bg|>": 50292,
10
+ "<|bn|>": 50302,
11
+ "<|bo|>": 50347,
12
+ "<|br|>": 50309,
13
+ "<|bs|>": 50315,
14
+ "<|ca|>": 50270,
15
+ "<|cs|>": 50283,
16
+ "<|cy|>": 50297,
17
+ "<|da|>": 50285,
18
+ "<|de|>": 50261,
19
+ "<|el|>": 50281,
20
+ "<|endoftext|>": 50257,
21
+ "<|en|>": 50259,
22
+ "<|es|>": 50262,
23
+ "<|et|>": 50307,
24
+ "<|eu|>": 50310,
25
+ "<|fa|>": 50300,
26
+ "<|fi|>": 50277,
27
+ "<|fo|>": 50338,
28
+ "<|fr|>": 50265,
29
+ "<|gl|>": 50319,
30
+ "<|gu|>": 50333,
31
+ "<|haw|>": 50352,
32
+ "<|ha|>": 50354,
33
+ "<|hi|>": 50276,
34
+ "<|hr|>": 50291,
35
+ "<|ht|>": 50339,
36
+ "<|hu|>": 50286,
37
+ "<|hy|>": 50312,
38
+ "<|id|>": 50275,
39
+ "<|is|>": 50311,
40
+ "<|it|>": 50274,
41
+ "<|iw|>": 50279,
42
+ "<|ja|>": 50266,
43
+ "<|jw|>": 50356,
44
+ "<|ka|>": 50329,
45
+ "<|kk|>": 50316,
46
+ "<|km|>": 50323,
47
+ "<|kn|>": 50306,
48
+ "<|ko|>": 50264,
49
+ "<|la|>": 50294,
50
+ "<|lb|>": 50345,
51
+ "<|ln|>": 50353,
52
+ "<|lo|>": 50336,
53
+ "<|lt|>": 50293,
54
+ "<|lv|>": 50301,
55
+ "<|mg|>": 50349,
56
+ "<|mi|>": 50295,
57
+ "<|mk|>": 50308,
58
+ "<|ml|>": 50296,
59
+ "<|mn|>": 50314,
60
+ "<|mr|>": 50320,
61
+ "<|ms|>": 50282,
62
+ "<|mt|>": 50343,
63
+ "<|my|>": 50346,
64
+ "<|ne|>": 50313,
65
+ "<|nl|>": 50271,
66
+ "<|nn|>": 50342,
67
+ "<|nocaptions|>": 50362,
68
+ "<|notimestamps|>": 50363,
69
+ "<|no|>": 50288,
70
+ "<|oc|>": 50328,
71
+ "<|pa|>": 50321,
72
+ "<|pl|>": 50269,
73
+ "<|ps|>": 50340,
74
+ "<|pt|>": 50267,
75
+ "<|ro|>": 50284,
76
+ "<|ru|>": 50263,
77
+ "<|sa|>": 50344,
78
+ "<|sd|>": 50332,
79
+ "<|si|>": 50322,
80
+ "<|sk|>": 50298,
81
+ "<|sl|>": 50305,
82
+ "<|sn|>": 50324,
83
+ "<|so|>": 50326,
84
+ "<|sq|>": 50317,
85
+ "<|sr|>": 50303,
86
+ "<|startoflm|>": 50360,
87
+ "<|startofprev|>": 50361,
88
+ "<|startoftranscript|>": 50258,
89
+ "<|su|>": 50357,
90
+ "<|sv|>": 50273,
91
+ "<|sw|>": 50318,
92
+ "<|ta|>": 50287,
93
+ "<|te|>": 50299,
94
+ "<|tg|>": 50331,
95
+ "<|th|>": 50289,
96
+ "<|tk|>": 50341,
97
+ "<|tl|>": 50348,
98
+ "<|transcribe|>": 50359,
99
+ "<|translate|>": 50358,
100
+ "<|tr|>": 50268,
101
+ "<|tt|>": 50351,
102
+ "<|uk|>": 50280,
103
+ "<|ur|>": 50290,
104
+ "<|uz|>": 50337,
105
+ "<|vi|>": 50278,
106
+ "<|yi|>": 50335,
107
+ "<|yo|>": 50325,
108
+ "<|zh|>": 50260
109
+ }
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 31.45,
3
+ "eval_loss": 0.5813759565353394,
4
+ "eval_runtime": 1248.5474,
5
+ "eval_samples_per_second": 1.708,
6
+ "eval_steps_per_second": 0.214,
7
+ "eval_wer": 13.708574434508153,
8
+ "train_loss": 0.0230206538159051,
9
+ "train_runtime": 56008.0991,
10
+ "train_samples_per_second": 5.713,
11
+ "train_steps_per_second": 0.179
12
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 31.45,
3
+ "eval_loss": 0.5813759565353394,
4
+ "eval_runtime": 1248.5474,
5
+ "eval_samples_per_second": 1.708,
6
+ "eval_steps_per_second": 0.214,
7
+ "eval_wer": 13.708574434508153
8
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
normalizer.json ADDED
@@ -0,0 +1,1742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accessorise": "accessorize",
3
+ "accessorised": "accessorized",
4
+ "accessorises": "accessorizes",
5
+ "accessorising": "accessorizing",
6
+ "acclimatisation": "acclimatization",
7
+ "acclimatise": "acclimatize",
8
+ "acclimatised": "acclimatized",
9
+ "acclimatises": "acclimatizes",
10
+ "acclimatising": "acclimatizing",
11
+ "accoutrements": "accouterments",
12
+ "aeon": "eon",
13
+ "aeons": "eons",
14
+ "aerogramme": "aerogram",
15
+ "aerogrammes": "aerograms",
16
+ "aeroplane": "airplane",
17
+ "aeroplanes": "airplanes",
18
+ "aesthete": "esthete",
19
+ "aesthetes": "esthetes",
20
+ "aesthetic": "esthetic",
21
+ "aesthetically": "esthetically",
22
+ "aesthetics": "esthetics",
23
+ "aetiology": "etiology",
24
+ "ageing": "aging",
25
+ "aggrandisement": "aggrandizement",
26
+ "agonise": "agonize",
27
+ "agonised": "agonized",
28
+ "agonises": "agonizes",
29
+ "agonising": "agonizing",
30
+ "agonisingly": "agonizingly",
31
+ "almanack": "almanac",
32
+ "almanacks": "almanacs",
33
+ "aluminium": "aluminum",
34
+ "amortisable": "amortizable",
35
+ "amortisation": "amortization",
36
+ "amortisations": "amortizations",
37
+ "amortise": "amortize",
38
+ "amortised": "amortized",
39
+ "amortises": "amortizes",
40
+ "amortising": "amortizing",
41
+ "amphitheatre": "amphitheater",
42
+ "amphitheatres": "amphitheaters",
43
+ "anaemia": "anemia",
44
+ "anaemic": "anemic",
45
+ "anaesthesia": "anesthesia",
46
+ "anaesthetic": "anesthetic",
47
+ "anaesthetics": "anesthetics",
48
+ "anaesthetise": "anesthetize",
49
+ "anaesthetised": "anesthetized",
50
+ "anaesthetises": "anesthetizes",
51
+ "anaesthetising": "anesthetizing",
52
+ "anaesthetist": "anesthetist",
53
+ "anaesthetists": "anesthetists",
54
+ "anaesthetize": "anesthetize",
55
+ "anaesthetized": "anesthetized",
56
+ "anaesthetizes": "anesthetizes",
57
+ "anaesthetizing": "anesthetizing",
58
+ "analogue": "analog",
59
+ "analogues": "analogs",
60
+ "analyse": "analyze",
61
+ "analysed": "analyzed",
62
+ "analyses": "analyzes",
63
+ "analysing": "analyzing",
64
+ "anglicise": "anglicize",
65
+ "anglicised": "anglicized",
66
+ "anglicises": "anglicizes",
67
+ "anglicising": "anglicizing",
68
+ "annualised": "annualized",
69
+ "antagonise": "antagonize",
70
+ "antagonised": "antagonized",
71
+ "antagonises": "antagonizes",
72
+ "antagonising": "antagonizing",
73
+ "apologise": "apologize",
74
+ "apologised": "apologized",
75
+ "apologises": "apologizes",
76
+ "apologising": "apologizing",
77
+ "appal": "appall",
78
+ "appals": "appalls",
79
+ "appetiser": "appetizer",
80
+ "appetisers": "appetizers",
81
+ "appetising": "appetizing",
82
+ "appetisingly": "appetizingly",
83
+ "arbour": "arbor",
84
+ "arbours": "arbors",
85
+ "archaeologically": "archeologically",
86
+ "archaeologist": "archeologist",
87
+ "archaeologists": "archeologists",
88
+ "archaeology": "archeology</span>",
89
+ "archeological": "archaeological",
90
+ "ardour": "ardor",
91
+ "armour": "armor",
92
+ "armoured": "armored",
93
+ "armourer": "armorer",
94
+ "armourers": "armorers",
95
+ "armouries": "armories",
96
+ "armoury": "armory",
97
+ "artefact": "artifact",
98
+ "artefacts": "artifacts",
99
+ "authorise": "authorize",
100
+ "authorised": "authorized",
101
+ "authorises": "authorizes",
102
+ "authorising": "authorizing",
103
+ "axe": "ax",
104
+ "backpedalled": "backpedaled",
105
+ "backpedalling": "backpedaling",
106
+ "bannister": "banister",
107
+ "bannisters": "banisters",
108
+ "baptise": "baptize",
109
+ "baptised": "baptized",
110
+ "baptises": "baptizes",
111
+ "baptising": "baptizing",
112
+ "bastardise": "bastardize",
113
+ "bastardised": "bastardized",
114
+ "bastardises": "bastardizes",
115
+ "bastardising": "bastardizing",
116
+ "battleax": "battleaxe",
117
+ "baulk": "balk",
118
+ "baulked": "balked",
119
+ "baulking": "balking",
120
+ "baulks": "balks",
121
+ "bedevilled": "bedeviled",
122
+ "bedevilling": "bedeviling",
123
+ "behaviour": "behavior",
124
+ "behavioural": "behavioral",
125
+ "behaviourism": "behaviorism",
126
+ "behaviourist": "behaviorist",
127
+ "behaviourists": "behaviorists",
128
+ "behaviours": "behaviors",
129
+ "behove": "behoove",
130
+ "behoved": "behooved",
131
+ "behoves": "behooves",
132
+ "bejewelled": "bejeweled",
133
+ "belabour": "belabor",
134
+ "belaboured": "belabored",
135
+ "belabouring": "belaboring",
136
+ "belabours": "belabors",
137
+ "bevelled": "beveled",
138
+ "bevvies": "bevies",
139
+ "bevvy": "bevy",
140
+ "biassed": "biased",
141
+ "biassing": "biasing",
142
+ "bingeing": "binging",
143
+ "bougainvillaea": "bougainvillea",
144
+ "bougainvillaeas": "bougainvilleas",
145
+ "bowdlerise": "bowdlerize",
146
+ "bowdlerised": "bowdlerized",
147
+ "bowdlerises": "bowdlerizes",
148
+ "bowdlerising": "bowdlerizing",
149
+ "breathalyse": "breathalyze",
150
+ "breathalysed": "breathalyzed",
151
+ "breathalyser": "breathalyzer",
152
+ "breathalysers": "breathalyzers",
153
+ "breathalyses": "breathalyzes",
154
+ "breathalysing": "breathalyzing",
155
+ "brutalise": "brutalize",
156
+ "brutalised": "brutalized",
157
+ "brutalises": "brutalizes",
158
+ "brutalising": "brutalizing",
159
+ "busses": "buses",
160
+ "bussing": "busing",
161
+ "caesarean": "cesarean",
162
+ "caesareans": "cesareans",
163
+ "calibre": "caliber",
164
+ "calibres": "calibers",
165
+ "calliper": "caliper",
166
+ "callipers": "calipers",
167
+ "callisthenics": "calisthenics",
168
+ "canalise": "canalize",
169
+ "canalised": "canalized",
170
+ "canalises": "canalizes",
171
+ "canalising": "canalizing",
172
+ "cancelation": "cancellation",
173
+ "cancelations": "cancellations",
174
+ "cancelled": "canceled",
175
+ "cancelling": "canceling",
176
+ "candour": "candor",
177
+ "cannibalise": "cannibalize",
178
+ "cannibalised": "cannibalized",
179
+ "cannibalises": "cannibalizes",
180
+ "cannibalising": "cannibalizing",
181
+ "canonise": "canonize",
182
+ "canonised": "canonized",
183
+ "canonises": "canonizes",
184
+ "canonising": "canonizing",
185
+ "capitalise": "capitalize",
186
+ "capitalised": "capitalized",
187
+ "capitalises": "capitalizes",
188
+ "capitalising": "capitalizing",
189
+ "caramelise": "caramelize",
190
+ "caramelised": "caramelized",
191
+ "caramelises": "caramelizes",
192
+ "caramelising": "caramelizing",
193
+ "carbonise": "carbonize",
194
+ "carbonised": "carbonized",
195
+ "carbonises": "carbonizes",
196
+ "carbonising": "carbonizing",
197
+ "carolled": "caroled",
198
+ "carolling": "caroling",
199
+ "catalogue": "catalog",
200
+ "catalogued": "cataloged",
201
+ "catalogues": "catalogs",
202
+ "cataloguing": "cataloging",
203
+ "catalyse": "catalyze",
204
+ "catalysed": "catalyzed",
205
+ "catalyses": "catalyzes",
206
+ "catalysing": "catalyzing",
207
+ "categorise": "categorize",
208
+ "categorised": "categorized",
209
+ "categorises": "categorizes",
210
+ "categorising": "categorizing",
211
+ "cauterise": "cauterize",
212
+ "cauterised": "cauterized",
213
+ "cauterises": "cauterizes",
214
+ "cauterising": "cauterizing",
215
+ "cavilled": "caviled",
216
+ "cavilling": "caviling",
217
+ "centigramme": "centigram",
218
+ "centigrammes": "centigrams",
219
+ "centilitre": "centiliter",
220
+ "centilitres": "centiliters",
221
+ "centimetre": "centimeter",
222
+ "centimetres": "centimeters",
223
+ "centralise": "centralize",
224
+ "centralised": "centralized",
225
+ "centralises": "centralizes",
226
+ "centralising": "centralizing",
227
+ "centre": "center",
228
+ "centred": "centered",
229
+ "centrefold": "centerfold",
230
+ "centrefolds": "centerfolds",
231
+ "centrepiece": "centerpiece",
232
+ "centrepieces": "centerpieces",
233
+ "centres": "centers",
234
+ "channelled": "channeled",
235
+ "channelling": "channeling",
236
+ "characterise": "characterize",
237
+ "characterised": "characterized",
238
+ "characterises": "characterizes",
239
+ "characterising": "characterizing",
240
+ "cheque": "check",
241
+ "chequebook": "checkbook",
242
+ "chequebooks": "checkbooks",
243
+ "chequered": "checkered",
244
+ "cheques": "checks",
245
+ "chilli": "chili",
246
+ "chimaera": "chimera",
247
+ "chimaeras": "chimeras",
248
+ "chiselled": "chiseled",
249
+ "chiselling": "chiseling",
250
+ "circularise": "circularize",
251
+ "circularised": "circularized",
252
+ "circularises": "circularizes",
253
+ "circularising": "circularizing",
254
+ "civilise": "civilize",
255
+ "civilised": "civilized",
256
+ "civilises": "civilizes",
257
+ "civilising": "civilizing",
258
+ "clamour": "clamor",
259
+ "clamoured": "clamored",
260
+ "clamouring": "clamoring",
261
+ "clamours": "clamors",
262
+ "clangour": "clangor",
263
+ "clarinettist": "clarinetist",
264
+ "clarinettists": "clarinetists",
265
+ "collectivise": "collectivize",
266
+ "collectivised": "collectivized",
267
+ "collectivises": "collectivizes",
268
+ "collectivising": "collectivizing",
269
+ "colonisation": "colonization",
270
+ "colonise": "colonize",
271
+ "colonised": "colonized",
272
+ "coloniser": "colonizer",
273
+ "colonisers": "colonizers",
274
+ "colonises": "colonizes",
275
+ "colonising": "colonizing",
276
+ "colour": "color",
277
+ "colourant": "colorant",
278
+ "colourants": "colorants",
279
+ "coloured": "colored",
280
+ "coloureds": "coloreds",
281
+ "colourful": "colorful",
282
+ "colourfully": "colorfully",
283
+ "colouring": "coloring",
284
+ "colourize": "colorize",
285
+ "colourized": "colorized",
286
+ "colourizes": "colorizes",
287
+ "colourizing": "colorizing",
288
+ "colourless": "colorless",
289
+ "colours": "colors",
290
+ "commercialise": "commercialize",
291
+ "commercialised": "commercialized",
292
+ "commercialises": "commercializes",
293
+ "commercialising": "commercializing",
294
+ "compartmentalise": "compartmentalize",
295
+ "compartmentalised": "compartmentalized",
296
+ "compartmentalises": "compartmentalizes",
297
+ "compartmentalising": "compartmentalizing",
298
+ "computerise": "computerize",
299
+ "computerised": "computerized",
300
+ "computerises": "computerizes",
301
+ "computerising": "computerizing",
302
+ "conceptualise": "conceptualize",
303
+ "conceptualised": "conceptualized",
304
+ "conceptualises": "conceptualizes",
305
+ "conceptualising": "conceptualizing",
306
+ "connexion": "connection",
307
+ "connexions": "connections",
308
+ "contextualise": "contextualize",
309
+ "contextualised": "contextualized",
310
+ "contextualises": "contextualizes",
311
+ "contextualising": "contextualizing",
312
+ "cosier": "cozier",
313
+ "cosies": "cozies",
314
+ "cosiest": "coziest",
315
+ "cosily": "cozily",
316
+ "cosiness": "coziness",
317
+ "cosy": "cozy",
318
+ "councillor": "councilor",
319
+ "councillors": "councilors",
320
+ "counselled": "counseled",
321
+ "counselling": "counseling",
322
+ "counsellor": "counselor",
323
+ "counsellors": "counselors",
324
+ "crenelated": "crenellated",
325
+ "criminalise": "criminalize",
326
+ "criminalised": "criminalized",
327
+ "criminalises": "criminalizes",
328
+ "criminalising": "criminalizing",
329
+ "criticise": "criticize",
330
+ "criticised": "criticized",
331
+ "criticises": "criticizes",
332
+ "criticising": "criticizing",
333
+ "crueller": "crueler",
334
+ "cruellest": "cruelest",
335
+ "crystallisation": "crystallization",
336
+ "crystallise": "crystallize",
337
+ "crystallised": "crystallized",
338
+ "crystallises": "crystallizes",
339
+ "crystallising": "crystallizing",
340
+ "cudgelled": "cudgeled",
341
+ "cudgelling": "cudgeling",
342
+ "customise": "customize",
343
+ "customised": "customized",
344
+ "customises": "customizes",
345
+ "customising": "customizing",
346
+ "cypher": "cipher",
347
+ "cyphers": "ciphers",
348
+ "decentralisation": "decentralization",
349
+ "decentralise": "decentralize",
350
+ "decentralised": "decentralized",
351
+ "decentralises": "decentralizes",
352
+ "decentralising": "decentralizing",
353
+ "decriminalisation": "decriminalization",
354
+ "decriminalise": "decriminalize",
355
+ "decriminalised": "decriminalized",
356
+ "decriminalises": "decriminalizes",
357
+ "decriminalising": "decriminalizing",
358
+ "defence": "defense",
359
+ "defenceless": "defenseless",
360
+ "defences": "defenses",
361
+ "dehumanisation": "dehumanization",
362
+ "dehumanise": "dehumanize",
363
+ "dehumanised": "dehumanized",
364
+ "dehumanises": "dehumanizes",
365
+ "dehumanising": "dehumanizing",
366
+ "demeanour": "demeanor",
367
+ "demilitarisation": "demilitarization",
368
+ "demilitarise": "demilitarize",
369
+ "demilitarised": "demilitarized",
370
+ "demilitarises": "demilitarizes",
371
+ "demilitarising": "demilitarizing",
372
+ "demobilisation": "demobilization",
373
+ "demobilise": "demobilize",
374
+ "demobilised": "demobilized",
375
+ "demobilises": "demobilizes",
376
+ "demobilising": "demobilizing",
377
+ "democratisation": "democratization",
378
+ "democratise": "democratize",
379
+ "democratised": "democratized",
380
+ "democratises": "democratizes",
381
+ "democratising": "democratizing",
382
+ "demonise": "demonize",
383
+ "demonised": "demonized",
384
+ "demonises": "demonizes",
385
+ "demonising": "demonizing",
386
+ "demoralisation": "demoralization",
387
+ "demoralise": "demoralize",
388
+ "demoralised": "demoralized",
389
+ "demoralises": "demoralizes",
390
+ "demoralising": "demoralizing",
391
+ "denationalisation": "denationalization",
392
+ "denationalise": "denationalize",
393
+ "denationalised": "denationalized",
394
+ "denationalises": "denationalizes",
395
+ "denationalising": "denationalizing",
396
+ "deodorise": "deodorize",
397
+ "deodorised": "deodorized",
398
+ "deodorises": "deodorizes",
399
+ "deodorising": "deodorizing",
400
+ "depersonalise": "depersonalize",
401
+ "depersonalised": "depersonalized",
402
+ "depersonalises": "depersonalizes",
403
+ "depersonalising": "depersonalizing",
404
+ "deputise": "deputize",
405
+ "deputised": "deputized",
406
+ "deputises": "deputizes",
407
+ "deputising": "deputizing",
408
+ "desensitisation": "desensitization",
409
+ "desensitise": "desensitize",
410
+ "desensitised": "desensitized",
411
+ "desensitises": "desensitizes",
412
+ "desensitising": "desensitizing",
413
+ "destabilisation": "destabilization",
414
+ "destabilise": "destabilize",
415
+ "destabilised": "destabilized",
416
+ "destabilises": "destabilizes",
417
+ "destabilising": "destabilizing",
418
+ "dialled": "dialed",
419
+ "dialling": "dialing",
420
+ "dialogue": "dialog",
421
+ "dialogues": "dialogs",
422
+ "diarrhoea": "diarrhea",
423
+ "digitise": "digitize",
424
+ "digitised": "digitized",
425
+ "digitises": "digitizes",
426
+ "digitising": "digitizing",
427
+ "disc": "disk",
428
+ "discolour": "discolor",
429
+ "discoloured": "discolored",
430
+ "discolouring": "discoloring",
431
+ "discolours": "discolors",
432
+ "discs": "disks",
433
+ "disembowelled": "disemboweled",
434
+ "disembowelling": "disemboweling",
435
+ "disfavour": "disfavor",
436
+ "dishevelled": "disheveled",
437
+ "dishonour": "dishonor",
438
+ "dishonourable": "dishonorable",
439
+ "dishonourably": "dishonorably",
440
+ "dishonoured": "dishonored",
441
+ "dishonouring": "dishonoring",
442
+ "dishonours": "dishonors",
443
+ "disorganisation": "disorganization",
444
+ "disorganised": "disorganized",
445
+ "distil": "distill",
446
+ "distils": "distills",
447
+ "dramatisation": "dramatization",
448
+ "dramatisations": "dramatizations",
449
+ "dramatise": "dramatize",
450
+ "dramatised": "dramatized",
451
+ "dramatises": "dramatizes",
452
+ "dramatising": "dramatizing",
453
+ "draught": "draft",
454
+ "draughtboard": "draftboard",
455
+ "draughtboards": "draftboards",
456
+ "draughtier": "draftier",
457
+ "draughtiest": "draftiest",
458
+ "draughts": "drafts",
459
+ "draughtsman": "draftsman",
460
+ "draughtsmanship": "draftsmanship",
461
+ "draughtsmen": "draftsmen",
462
+ "draughtswoman": "draftswoman",
463
+ "draughtswomen": "draftswomen",
464
+ "draughty": "drafty",
465
+ "drivelled": "driveled",
466
+ "drivelling": "driveling",
467
+ "duelled": "dueled",
468
+ "duelling": "dueling",
469
+ "economise": "economize",
470
+ "economised": "economized",
471
+ "economises": "economizes",
472
+ "economising": "economizing",
473
+ "editorialise": "editorialize",
474
+ "editorialised": "editorialized",
475
+ "editorialises": "editorializes",
476
+ "editorialising": "editorializing",
477
+ "edoema": "edema",
478
+ "empathise": "empathize",
479
+ "empathised": "empathized",
480
+ "empathises": "empathizes",
481
+ "empathising": "empathizing",
482
+ "emphasise": "emphasize",
483
+ "emphasised": "emphasized",
484
+ "emphasises": "emphasizes",
485
+ "emphasising": "emphasizing",
486
+ "enamelled": "enameled",
487
+ "enamelling": "enameling",
488
+ "enamoured": "enamored",
489
+ "encyclopaedia": "encyclopedia",
490
+ "encyclopaedias": "encyclopedias",
491
+ "encyclopaedic": "encyclopedic",
492
+ "endeavour": "endeavor",
493
+ "endeavoured": "endeavored",
494
+ "endeavouring": "endeavoring",
495
+ "endeavours": "endeavors",
496
+ "energise": "energize",
497
+ "energised": "energized",
498
+ "energises": "energizes",
499
+ "energising": "energizing",
500
+ "enrol": "enroll",
501
+ "enrols": "enrolls",
502
+ "enthral": "enthrall",
503
+ "enthrals": "enthralls",
504
+ "epaulette": "epaulet",
505
+ "epaulettes": "epaulets",
506
+ "epicentre": "epicenter",
507
+ "epicentres": "epicenters",
508
+ "epilogue": "epilog",
509
+ "epilogues": "epilogs",
510
+ "epitomise": "epitomize",
511
+ "epitomised": "epitomized",
512
+ "epitomises": "epitomizes",
513
+ "epitomising": "epitomizing",
514
+ "equalisation": "equalization",
515
+ "equalise": "equalize",
516
+ "equalised": "equalized",
517
+ "equaliser": "equalizer",
518
+ "equalisers": "equalizers",
519
+ "equalises": "equalizes",
520
+ "equalising": "equalizing",
521
+ "eulogise": "eulogize",
522
+ "eulogised": "eulogized",
523
+ "eulogises": "eulogizes",
524
+ "eulogising": "eulogizing",
525
+ "evangelise": "evangelize",
526
+ "evangelised": "evangelized",
527
+ "evangelises": "evangelizes",
528
+ "evangelising": "evangelizing",
529
+ "exorcise": "exorcize",
530
+ "exorcised": "exorcized",
531
+ "exorcises": "exorcizes",
532
+ "exorcising": "exorcizing",
533
+ "extemporisation": "extemporization",
534
+ "extemporise": "extemporize",
535
+ "extemporised": "extemporized",
536
+ "extemporises": "extemporizes",
537
+ "extemporising": "extemporizing",
538
+ "externalisation": "externalization",
539
+ "externalisations": "externalizations",
540
+ "externalise": "externalize",
541
+ "externalised": "externalized",
542
+ "externalises": "externalizes",
543
+ "externalising": "externalizing",
544
+ "factorise": "factorize",
545
+ "factorised": "factorized",
546
+ "factorises": "factorizes",
547
+ "factorising": "factorizing",
548
+ "faecal": "fecal",
549
+ "faeces": "feces",
550
+ "familiarisation": "familiarization",
551
+ "familiarise": "familiarize",
552
+ "familiarised": "familiarized",
553
+ "familiarises": "familiarizes",
554
+ "familiarising": "familiarizing",
555
+ "fantasise": "fantasize",
556
+ "fantasised": "fantasized",
557
+ "fantasises": "fantasizes",
558
+ "fantasising": "fantasizing",
559
+ "favour": "favor",
560
+ "favourable": "favorable",
561
+ "favourably": "favorably",
562
+ "favoured": "favored",
563
+ "favouring": "favoring",
564
+ "favourite": "favorite",
565
+ "favourites": "favorites",
566
+ "favouritism": "favoritism",
567
+ "favours": "favors",
568
+ "feminise": "feminize",
569
+ "feminised": "feminized",
570
+ "feminises": "feminizes",
571
+ "feminising": "feminizing",
572
+ "fertilisation": "fertilization",
573
+ "fertilise": "fertilize",
574
+ "fertilised": "fertilized",
575
+ "fertiliser": "fertilizer",
576
+ "fertilisers": "fertilizers",
577
+ "fertilises": "fertilizes",
578
+ "fertilising": "fertilizing",
579
+ "fervour": "fervor",
580
+ "fibre": "fiber",
581
+ "fibreglass": "fiberglass",
582
+ "fibres": "fibers",
583
+ "fictionalisation": "fictionalization",
584
+ "fictionalisations": "fictionalizations",
585
+ "fictionalise": "fictionalize",
586
+ "fictionalised": "fictionalized",
587
+ "fictionalises": "fictionalizes",
588
+ "fictionalising": "fictionalizing",
589
+ "fillet": "filet",
590
+ "filleted": "fileted",
591
+ "filleting": "fileting",
592
+ "fillets": "filets",
593
+ "finalisation": "finalization",
594
+ "finalise": "finalize",
595
+ "finalised": "finalized",
596
+ "finalises": "finalizes",
597
+ "finalising": "finalizing",
598
+ "flautist": "flutist",
599
+ "flautists": "flutists",
600
+ "flavour": "flavor",
601
+ "flavoured": "flavored",
602
+ "flavouring": "flavoring",
603
+ "flavourings": "flavorings",
604
+ "flavourless": "flavorless",
605
+ "flavours": "flavors",
606
+ "flavoursome": "flavorsome",
607
+ "flyer / flier": "flier / flyer",
608
+ "foetal": "fetal",
609
+ "foetid": "fetid",
610
+ "foetus": "fetus",
611
+ "foetuses": "fetuses",
612
+ "formalisation": "formalization",
613
+ "formalise": "formalize",
614
+ "formalised": "formalized",
615
+ "formalises": "formalizes",
616
+ "formalising": "formalizing",
617
+ "fossilisation": "fossilization",
618
+ "fossilise": "fossilize",
619
+ "fossilised": "fossilized",
620
+ "fossilises": "fossilizes",
621
+ "fossilising": "fossilizing",
622
+ "fraternisation": "fraternization",
623
+ "fraternise": "fraternize",
624
+ "fraternised": "fraternized",
625
+ "fraternises": "fraternizes",
626
+ "fraternising": "fraternizing",
627
+ "fulfil": "fulfill",
628
+ "fulfilment": "fulfillment",
629
+ "fulfils": "fulfills",
630
+ "funnelled": "funneled",
631
+ "funnelling": "funneling",
632
+ "gage": "gauge",
633
+ "gaged": "gauged",
634
+ "gages": "gauges",
635
+ "gaging": "gauging",
636
+ "galvanise": "galvanize",
637
+ "galvanised": "galvanized",
638
+ "galvanises": "galvanizes",
639
+ "galvanising": "galvanizing",
640
+ "gambolled": "gamboled",
641
+ "gambolling": "gamboling",
642
+ "gaol": "jail",
643
+ "gaolbird": "jailbird",
644
+ "gaolbirds": "jailbirds",
645
+ "gaolbreak": "jailbreak",
646
+ "gaolbreaks": "jailbreaks",
647
+ "gaoled": "jailed",
648
+ "gaoler": "jailer",
649
+ "gaolers": "jailers",
650
+ "gaoling": "jailing",
651
+ "gaols": "jails",
652
+ "gasses": "gases",
653
+ "generalisation": "generalization",
654
+ "generalisations": "generalizations",
655
+ "generalise": "generalize",
656
+ "generalised": "generalized",
657
+ "generalises": "generalizes",
658
+ "generalising": "generalizing",
659
+ "ghettoise": "ghettoize",
660
+ "ghettoised": "ghettoized",
661
+ "ghettoises": "ghettoizes",
662
+ "ghettoising": "ghettoizing",
663
+ "gipsies": "gypsies",
664
+ "glamor": "glamour",
665
+ "glamorise": "glamorize",
666
+ "glamorised": "glamorized",
667
+ "glamorises": "glamorizes",
668
+ "glamorising": "glamorizing",
669
+ "globalisation": "globalization",
670
+ "globalise": "globalize",
671
+ "globalised": "globalized",
672
+ "globalises": "globalizes",
673
+ "globalising": "globalizing",
674
+ "glueing": "gluing",
675
+ "goitre": "goiter",
676
+ "goitres": "goiters",
677
+ "gonorrhoea": "gonorrhea",
678
+ "gramme": "gram",
679
+ "grammes": "grams",
680
+ "gravelled": "graveled",
681
+ "grey": "gray",
682
+ "greyed": "grayed",
683
+ "greying": "graying",
684
+ "greyish": "grayish",
685
+ "greyness": "grayness",
686
+ "greys": "grays",
687
+ "grovelled": "groveled",
688
+ "grovelling": "groveling",
689
+ "groyne": "groin",
690
+ "groynes": "groins",
691
+ "gruelling": "grueling",
692
+ "gruellingly": "gruelingly",
693
+ "gryphon": "griffin",
694
+ "gryphons": "griffins",
695
+ "gynaecological": "gynecological",
696
+ "gynaecologist": "gynecologist",
697
+ "gynaecologists": "gynecologists",
698
+ "gynaecology": "gynecology",
699
+ "haematological": "hematological",
700
+ "haematologist": "hematologist",
701
+ "haematologists": "hematologists",
702
+ "haematology": "hematology",
703
+ "haemoglobin": "hemoglobin",
704
+ "haemophilia": "hemophilia",
705
+ "haemophiliac": "hemophiliac",
706
+ "haemophiliacs": "hemophiliacs",
707
+ "haemorrhage": "hemorrhage",
708
+ "haemorrhaged": "hemorrhaged",
709
+ "haemorrhages": "hemorrhages",
710
+ "haemorrhaging": "hemorrhaging",
711
+ "haemorrhoids": "hemorrhoids",
712
+ "harbour": "harbor",
713
+ "harboured": "harbored",
714
+ "harbouring": "harboring",
715
+ "harbours": "harbors",
716
+ "harmonisation": "harmonization",
717
+ "harmonise": "harmonize",
718
+ "harmonised": "harmonized",
719
+ "harmonises": "harmonizes",
720
+ "harmonising": "harmonizing",
721
+ "homoeopath": "homeopath",
722
+ "homoeopathic": "homeopathic",
723
+ "homoeopaths": "homeopaths",
724
+ "homoeopathy": "homeopathy",
725
+ "homogenise": "homogenize",
726
+ "homogenised": "homogenized",
727
+ "homogenises": "homogenizes",
728
+ "homogenising": "homogenizing",
729
+ "honour": "honor",
730
+ "honourable": "honorable",
731
+ "honourably": "honorably",
732
+ "honoured": "honored",
733
+ "honouring": "honoring",
734
+ "honours": "honors",
735
+ "hospitalisation": "hospitalization",
736
+ "hospitalise": "hospitalize",
737
+ "hospitalised": "hospitalized",
738
+ "hospitalises": "hospitalizes",
739
+ "hospitalising": "hospitalizing",
740
+ "humanise": "humanize",
741
+ "humanised": "humanized",
742
+ "humanises": "humanizes",
743
+ "humanising": "humanizing",
744
+ "humour": "humor",
745
+ "humoured": "humored",
746
+ "humouring": "humoring",
747
+ "humourless": "humorless",
748
+ "humours": "humors",
749
+ "hybridise": "hybridize",
750
+ "hybridised": "hybridized",
751
+ "hybridises": "hybridizes",
752
+ "hybridising": "hybridizing",
753
+ "hypnotise": "hypnotize",
754
+ "hypnotised": "hypnotized",
755
+ "hypnotises": "hypnotizes",
756
+ "hypnotising": "hypnotizing",
757
+ "hypothesise": "hypothesize",
758
+ "hypothesised": "hypothesized",
759
+ "hypothesises": "hypothesizes",
760
+ "hypothesising": "hypothesizing",
761
+ "idealisation": "idealization",
762
+ "idealise": "idealize",
763
+ "idealised": "idealized",
764
+ "idealises": "idealizes",
765
+ "idealising": "idealizing",
766
+ "idolise": "idolize",
767
+ "idolised": "idolized",
768
+ "idolises": "idolizes",
769
+ "idolising": "idolizing",
770
+ "immobilisation": "immobilization",
771
+ "immobilise": "immobilize",
772
+ "immobilised": "immobilized",
773
+ "immobiliser": "immobilizer",
774
+ "immobilisers": "immobilizers",
775
+ "immobilises": "immobilizes",
776
+ "immobilising": "immobilizing",
777
+ "immortalise": "immortalize",
778
+ "immortalised": "immortalized",
779
+ "immortalises": "immortalizes",
780
+ "immortalising": "immortalizing",
781
+ "immunisation": "immunization",
782
+ "immunise": "immunize",
783
+ "immunised": "immunized",
784
+ "immunises": "immunizes",
785
+ "immunising": "immunizing",
786
+ "impanelled": "impaneled",
787
+ "impanelling": "impaneling",
788
+ "imperilled": "imperiled",
789
+ "imperilling": "imperiling",
790
+ "individualise": "individualize",
791
+ "individualised": "individualized",
792
+ "individualises": "individualizes",
793
+ "individualising": "individualizing",
794
+ "industrialise": "industrialize",
795
+ "industrialised": "industrialized",
796
+ "industrialises": "industrializes",
797
+ "industrialising": "industrializing",
798
+ "inflexion": "inflection",
799
+ "inflexions": "inflections",
800
+ "initialise": "initialize",
801
+ "initialised": "initialized",
802
+ "initialises": "initializes",
803
+ "initialising": "initializing",
804
+ "initialled": "initialed",
805
+ "initialling": "initialing",
806
+ "instal": "install",
807
+ "instalment": "installment",
808
+ "instalments": "installments",
809
+ "instals": "installs",
810
+ "instil": "instill",
811
+ "instils": "instills",
812
+ "institutionalisation": "institutionalization",
813
+ "institutionalise": "institutionalize",
814
+ "institutionalised": "institutionalized",
815
+ "institutionalises": "institutionalizes",
816
+ "institutionalising": "institutionalizing",
817
+ "intellectualise": "intellectualize",
818
+ "intellectualised": "intellectualized",
819
+ "intellectualises": "intellectualizes",
820
+ "intellectualising": "intellectualizing",
821
+ "internalisation": "internalization",
822
+ "internalise": "internalize",
823
+ "internalised": "internalized",
824
+ "internalises": "internalizes",
825
+ "internalising": "internalizing",
826
+ "internationalisation": "internationalization",
827
+ "internationalise": "internationalize",
828
+ "internationalised": "internationalized",
829
+ "internationalises": "internationalizes",
830
+ "internationalising": "internationalizing",
831
+ "ionisation": "ionization",
832
+ "ionise": "ionize",
833
+ "ionised": "ionized",
834
+ "ioniser": "ionizer",
835
+ "ionisers": "ionizers",
836
+ "ionises": "ionizes",
837
+ "ionising": "ionizing",
838
+ "italicise": "italicize",
839
+ "italicised": "italicized",
840
+ "italicises": "italicizes",
841
+ "italicising": "italicizing",
842
+ "itemise": "itemize",
843
+ "itemised": "itemized",
844
+ "itemises": "itemizes",
845
+ "itemising": "itemizing",
846
+ "jeopardise": "jeopardize",
847
+ "jeopardised": "jeopardized",
848
+ "jeopardises": "jeopardizes",
849
+ "jeopardising": "jeopardizing",
850
+ "jewelled": "jeweled",
851
+ "jeweller": "jeweler",
852
+ "jewellers": "jewelers",
853
+ "jewellery": "jewelry",
854
+ "judgement": "judgment",
855
+ "kilogramme": "kilogram",
856
+ "kilogrammes": "kilograms",
857
+ "kilometre": "kilometer",
858
+ "kilometres": "kilometers",
859
+ "labelled": "labeled",
860
+ "labelling": "labeling",
861
+ "labour": "labor",
862
+ "laboured": "labored",
863
+ "labourer": "laborer",
864
+ "labourers": "laborers",
865
+ "labouring": "laboring",
866
+ "labours": "labors",
867
+ "lacklustre": "lackluster",
868
+ "legalisation": "legalization",
869
+ "legalise": "legalize",
870
+ "legalised": "legalized",
871
+ "legalises": "legalizes",
872
+ "legalising": "legalizing",
873
+ "legitimise": "legitimize",
874
+ "legitimised": "legitimized",
875
+ "legitimises": "legitimizes",
876
+ "legitimising": "legitimizing",
877
+ "leukaemia": "leukemia",
878
+ "levelled": "leveled",
879
+ "leveller": "leveler",
880
+ "levellers": "levelers",
881
+ "levelling": "leveling",
882
+ "libelled": "libeled",
883
+ "libelling": "libeling",
884
+ "libellous": "libelous",
885
+ "liberalisation": "liberalization",
886
+ "liberalise": "liberalize",
887
+ "liberalised": "liberalized",
888
+ "liberalises": "liberalizes",
889
+ "liberalising": "liberalizing",
890
+ "licence": "license",
891
+ "licenced": "licensed",
892
+ "licences": "licenses",
893
+ "licencing": "licensing",
894
+ "likeable": "likable",
895
+ "lionisation": "lionization",
896
+ "lionise": "lionize",
897
+ "lionised": "lionized",
898
+ "lionises": "lionizes",
899
+ "lionising": "lionizing",
900
+ "liquidise": "liquidize",
901
+ "liquidised": "liquidized",
902
+ "liquidiser": "liquidizer",
903
+ "liquidisers": "liquidizers",
904
+ "liquidises": "liquidizes",
905
+ "liquidising": "liquidizing",
906
+ "litre": "liter",
907
+ "litres": "liters",
908
+ "localise": "localize",
909
+ "localised": "localized",
910
+ "localises": "localizes",
911
+ "localising": "localizing",
912
+ "louvre": "louver",
913
+ "louvred": "louvered",
914
+ "louvres": "louvers",
915
+ "lustre": "luster",
916
+ "magnetise": "magnetize",
917
+ "magnetised": "magnetized",
918
+ "magnetises": "magnetizes",
919
+ "magnetising": "magnetizing",
920
+ "manoeuvrability": "maneuverability",
921
+ "manoeuvrable": "maneuverable",
922
+ "manoeuvre": "maneuver",
923
+ "manoeuvred": "maneuvered",
924
+ "manoeuvres": "maneuvers",
925
+ "manoeuvring": "maneuvering",
926
+ "manoeuvrings": "maneuverings",
927
+ "marginalisation": "marginalization",
928
+ "marginalise": "marginalize",
929
+ "marginalised": "marginalized",
930
+ "marginalises": "marginalizes",
931
+ "marginalising": "marginalizing",
932
+ "marshalled": "marshaled",
933
+ "marshalling": "marshaling",
934
+ "marvelled": "marveled",
935
+ "marvelling": "marveling",
936
+ "marvellous": "marvelous",
937
+ "marvellously": "marvelously",
938
+ "materialisation": "materialization",
939
+ "materialise": "materialize",
940
+ "materialised": "materialized",
941
+ "materialises": "materializes",
942
+ "materialising": "materializing",
943
+ "maximisation": "maximization",
944
+ "maximise": "maximize",
945
+ "maximised": "maximized",
946
+ "maximises": "maximizes",
947
+ "maximising": "maximizing",
948
+ "meagre": "meager",
949
+ "mechanisation": "mechanization",
950
+ "mechanise": "mechanize",
951
+ "mechanised": "mechanized",
952
+ "mechanises": "mechanizes",
953
+ "mechanising": "mechanizing",
954
+ "mediaeval": "medieval",
955
+ "memorialise": "memorialize",
956
+ "memorialised": "memorialized",
957
+ "memorialises": "memorializes",
958
+ "memorialising": "memorializing",
959
+ "memorise": "memorize",
960
+ "memorised": "memorized",
961
+ "memorises": "memorizes",
962
+ "memorising": "memorizing",
963
+ "mesmerise": "mesmerize",
964
+ "mesmerised": "mesmerized",
965
+ "mesmerises": "mesmerizes",
966
+ "mesmerising": "mesmerizing",
967
+ "metabolise": "metabolize",
968
+ "metabolised": "metabolized",
969
+ "metabolises": "metabolizes",
970
+ "metabolising": "metabolizing",
971
+ "metre": "meter",
972
+ "metres": "meters",
973
+ "mhm": "hmm",
974
+ "micrometre": "micrometer",
975
+ "micrometres": "micrometers",
976
+ "militarise": "militarize",
977
+ "militarised": "militarized",
978
+ "militarises": "militarizes",
979
+ "militarising": "militarizing",
980
+ "milligramme": "milligram",
981
+ "milligrammes": "milligrams",
982
+ "millilitre": "milliliter",
983
+ "millilitres": "milliliters",
984
+ "millimetre": "millimeter",
985
+ "millimetres": "millimeters",
986
+ "miniaturisation": "miniaturization",
987
+ "miniaturise": "miniaturize",
988
+ "miniaturised": "miniaturized",
989
+ "miniaturises": "miniaturizes",
990
+ "miniaturising": "miniaturizing",
991
+ "minibusses": "minibuses",
992
+ "minimise": "minimize",
993
+ "minimised": "minimized",
994
+ "minimises": "minimizes",
995
+ "minimising": "minimizing",
996
+ "misbehaviour": "misbehavior",
997
+ "misdemeanour": "misdemeanor",
998
+ "misdemeanours": "misdemeanors",
999
+ "misspelt": "misspelled",
1000
+ "mitre": "miter",
1001
+ "mitres": "miters",
1002
+ "mm": "hmm",
1003
+ "mmm": "hmm",
1004
+ "mobilisation": "mobilization",
1005
+ "mobilise": "mobilize",
1006
+ "mobilised": "mobilized",
1007
+ "mobilises": "mobilizes",
1008
+ "mobilising": "mobilizing",
1009
+ "modelled": "modeled",
1010
+ "modeller": "modeler",
1011
+ "modellers": "modelers",
1012
+ "modelling": "modeling",
1013
+ "modernise": "modernize",
1014
+ "modernised": "modernized",
1015
+ "modernises": "modernizes",
1016
+ "modernising": "modernizing",
1017
+ "moisturise": "moisturize",
1018
+ "moisturised": "moisturized",
1019
+ "moisturiser": "moisturizer",
1020
+ "moisturisers": "moisturizers",
1021
+ "moisturises": "moisturizes",
1022
+ "moisturising": "moisturizing",
1023
+ "monologue": "monolog",
1024
+ "monologues": "monologs",
1025
+ "monopolisation": "monopolization",
1026
+ "monopolise": "monopolize",
1027
+ "monopolised": "monopolized",
1028
+ "monopolises": "monopolizes",
1029
+ "monopolising": "monopolizing",
1030
+ "moralise": "moralize",
1031
+ "moralised": "moralized",
1032
+ "moralises": "moralizes",
1033
+ "moralising": "moralizing",
1034
+ "motorised": "motorized",
1035
+ "mould": "mold",
1036
+ "moulded": "molded",
1037
+ "moulder": "molder",
1038
+ "mouldered": "moldered",
1039
+ "mouldering": "moldering",
1040
+ "moulders": "molders",
1041
+ "mouldier": "moldier",
1042
+ "mouldiest": "moldiest",
1043
+ "moulding": "molding",
1044
+ "mouldings": "moldings",
1045
+ "moulds": "molds",
1046
+ "mouldy": "moldy",
1047
+ "moult": "molt",
1048
+ "moulted": "molted",
1049
+ "moulting": "molting",
1050
+ "moults": "molts",
1051
+ "moustache": "mustache",
1052
+ "moustached": "mustached",
1053
+ "moustaches": "mustaches",
1054
+ "moustachioed": "mustachioed",
1055
+ "multicoloured": "multicolored",
1056
+ "nationalisation": "nationalization",
1057
+ "nationalisations": "nationalizations",
1058
+ "nationalise": "nationalize",
1059
+ "nationalised": "nationalized",
1060
+ "nationalises": "nationalizes",
1061
+ "nationalising": "nationalizing",
1062
+ "naturalisation": "naturalization",
1063
+ "naturalise": "naturalize",
1064
+ "naturalised": "naturalized",
1065
+ "naturalises": "naturalizes",
1066
+ "naturalising": "naturalizing",
1067
+ "neighbour": "neighbor",
1068
+ "neighbourhood": "neighborhood",
1069
+ "neighbourhoods": "neighborhoods",
1070
+ "neighbouring": "neighboring",
1071
+ "neighbourliness": "neighborliness",
1072
+ "neighbourly": "neighborly",
1073
+ "neighbours": "neighbors",
1074
+ "neutralisation": "neutralization",
1075
+ "neutralise": "neutralize",
1076
+ "neutralised": "neutralized",
1077
+ "neutralises": "neutralizes",
1078
+ "neutralising": "neutralizing",
1079
+ "normalisation": "normalization",
1080
+ "normalise": "normalize",
1081
+ "normalised": "normalized",
1082
+ "normalises": "normalizes",
1083
+ "normalising": "normalizing",
1084
+ "odour": "odor",
1085
+ "odourless": "odorless",
1086
+ "odours": "odors",
1087
+ "oesophagus": "esophagus",
1088
+ "oesophaguses": "esophaguses",
1089
+ "oestrogen": "estrogen",
1090
+ "offence": "offense",
1091
+ "offences": "offenses",
1092
+ "omelette": "omelet",
1093
+ "omelettes": "omelets",
1094
+ "optimise": "optimize",
1095
+ "optimised": "optimized",
1096
+ "optimises": "optimizes",
1097
+ "optimising": "optimizing",
1098
+ "organisation": "organization",
1099
+ "organisational": "organizational",
1100
+ "organisations": "organizations",
1101
+ "organise": "organize",
1102
+ "organised": "organized",
1103
+ "organiser": "organizer",
1104
+ "organisers": "organizers",
1105
+ "organises": "organizes",
1106
+ "organising": "organizing",
1107
+ "orthopaedic": "orthopedic",
1108
+ "orthopaedics": "orthopedics",
1109
+ "ostracise": "ostracize",
1110
+ "ostracised": "ostracized",
1111
+ "ostracises": "ostracizes",
1112
+ "ostracising": "ostracizing",
1113
+ "outmanoeuvre": "outmaneuver",
1114
+ "outmanoeuvred": "outmaneuvered",
1115
+ "outmanoeuvres": "outmaneuvers",
1116
+ "outmanoeuvring": "outmaneuvering",
1117
+ "overemphasise": "overemphasize",
1118
+ "overemphasised": "overemphasized",
1119
+ "overemphasises": "overemphasizes",
1120
+ "overemphasising": "overemphasizing",
1121
+ "oxidisation": "oxidization",
1122
+ "oxidise": "oxidize",
1123
+ "oxidised": "oxidized",
1124
+ "oxidises": "oxidizes",
1125
+ "oxidising": "oxidizing",
1126
+ "paederast": "pederast",
1127
+ "paederasts": "pederasts",
1128
+ "paediatric": "pediatric",
1129
+ "paediatrician": "pediatrician",
1130
+ "paediatricians": "pediatricians",
1131
+ "paediatrics": "pediatrics",
1132
+ "paedophile": "pedophile",
1133
+ "paedophiles": "pedophiles",
1134
+ "paedophilia": "pedophilia",
1135
+ "palaeolithic": "paleolithic",
1136
+ "palaeontologist": "paleontologist",
1137
+ "palaeontologists": "paleontologists",
1138
+ "palaeontology": "paleontology",
1139
+ "panelled": "paneled",
1140
+ "panelling": "paneling",
1141
+ "panellist": "panelist",
1142
+ "panellists": "panelists",
1143
+ "paralyse": "paralyze",
1144
+ "paralysed": "paralyzed",
1145
+ "paralyses": "paralyzes",
1146
+ "paralysing": "paralyzing",
1147
+ "parcelled": "parceled",
1148
+ "parcelling": "parceling",
1149
+ "parlour": "parlor",
1150
+ "parlours": "parlors",
1151
+ "particularise": "particularize",
1152
+ "particularised": "particularized",
1153
+ "particularises": "particularizes",
1154
+ "particularising": "particularizing",
1155
+ "passivisation": "passivization",
1156
+ "passivise": "passivize",
1157
+ "passivised": "passivized",
1158
+ "passivises": "passivizes",
1159
+ "passivising": "passivizing",
1160
+ "pasteurisation": "pasteurization",
1161
+ "pasteurise": "pasteurize",
1162
+ "pasteurised": "pasteurized",
1163
+ "pasteurises": "pasteurizes",
1164
+ "pasteurising": "pasteurizing",
1165
+ "patronise": "patronize",
1166
+ "patronised": "patronized",
1167
+ "patronises": "patronizes",
1168
+ "patronising": "patronizing",
1169
+ "patronisingly": "patronizingly",
1170
+ "pedalled": "pedaled",
1171
+ "pedalling": "pedaling",
1172
+ "pedestrianisation": "pedestrianization",
1173
+ "pedestrianise": "pedestrianize",
1174
+ "pedestrianised": "pedestrianized",
1175
+ "pedestrianises": "pedestrianizes",
1176
+ "pedestrianising": "pedestrianizing",
1177
+ "penalise": "penalize",
1178
+ "penalised": "penalized",
1179
+ "penalises": "penalizes",
1180
+ "penalising": "penalizing",
1181
+ "pencilled": "penciled",
1182
+ "pencilling": "penciling",
1183
+ "personalise": "personalize",
1184
+ "personalised": "personalized",
1185
+ "personalises": "personalizes",
1186
+ "personalising": "personalizing",
1187
+ "pharmacopoeia": "pharmacopeia",
1188
+ "pharmacopoeias": "pharmacopeias",
1189
+ "philosophise": "philosophize",
1190
+ "philosophised": "philosophized",
1191
+ "philosophises": "philosophizes",
1192
+ "philosophising": "philosophizing",
1193
+ "philtre": "filter",
1194
+ "philtres": "filters",
1195
+ "phoney": "phony",
1196
+ "plagiarise": "plagiarize",
1197
+ "plagiarised": "plagiarized",
1198
+ "plagiarises": "plagiarizes",
1199
+ "plagiarising": "plagiarizing",
1200
+ "plough": "plow",
1201
+ "ploughed": "plowed",
1202
+ "ploughing": "plowing",
1203
+ "ploughman": "plowman",
1204
+ "ploughmen": "plowmen",
1205
+ "ploughs": "plows",
1206
+ "ploughshare": "plowshare",
1207
+ "ploughshares": "plowshares",
1208
+ "polarisation": "polarization",
1209
+ "polarise": "polarize",
1210
+ "polarised": "polarized",
1211
+ "polarises": "polarizes",
1212
+ "polarising": "polarizing",
1213
+ "politicisation": "politicization",
1214
+ "politicise": "politicize",
1215
+ "politicised": "politicized",
1216
+ "politicises": "politicizes",
1217
+ "politicising": "politicizing",
1218
+ "popularisation": "popularization",
1219
+ "popularise": "popularize",
1220
+ "popularised": "popularized",
1221
+ "popularises": "popularizes",
1222
+ "popularising": "popularizing",
1223
+ "pouffe": "pouf",
1224
+ "pouffes": "poufs",
1225
+ "practise": "practice",
1226
+ "practised": "practiced",
1227
+ "practises": "practices",
1228
+ "practising": "practicing",
1229
+ "praesidium": "presidium",
1230
+ "praesidiums": "presidiums",
1231
+ "pressurisation": "pressurization",
1232
+ "pressurise": "pressurize",
1233
+ "pressurised": "pressurized",
1234
+ "pressurises": "pressurizes",
1235
+ "pressurising": "pressurizing",
1236
+ "pretence": "pretense",
1237
+ "pretences": "pretenses",
1238
+ "primaeval": "primeval",
1239
+ "prioritisation": "prioritization",
1240
+ "prioritise": "prioritize",
1241
+ "prioritised": "prioritized",
1242
+ "prioritises": "prioritizes",
1243
+ "prioritising": "prioritizing",
1244
+ "privatisation": "privatization",
1245
+ "privatisations": "privatizations",
1246
+ "privatise": "privatize",
1247
+ "privatised": "privatized",
1248
+ "privatises": "privatizes",
1249
+ "privatising": "privatizing",
1250
+ "professionalisation": "professionalization",
1251
+ "professionalise": "professionalize",
1252
+ "professionalised": "professionalized",
1253
+ "professionalises": "professionalizes",
1254
+ "professionalising": "professionalizing",
1255
+ "programme": "program",
1256
+ "programmes": "programs",
1257
+ "prologue": "prolog",
1258
+ "prologues": "prologs",
1259
+ "propagandise": "propagandize",
1260
+ "propagandised": "propagandized",
1261
+ "propagandises": "propagandizes",
1262
+ "propagandising": "propagandizing",
1263
+ "proselytise": "proselytize",
1264
+ "proselytised": "proselytized",
1265
+ "proselytiser": "proselytizer",
1266
+ "proselytisers": "proselytizers",
1267
+ "proselytises": "proselytizes",
1268
+ "proselytising": "proselytizing",
1269
+ "psychoanalyse": "psychoanalyze",
1270
+ "psychoanalysed": "psychoanalyzed",
1271
+ "psychoanalyses": "psychoanalyzes",
1272
+ "psychoanalysing": "psychoanalyzing",
1273
+ "publicise": "publicize",
1274
+ "publicised": "publicized",
1275
+ "publicises": "publicizes",
1276
+ "publicising": "publicizing",
1277
+ "pulverisation": "pulverization",
1278
+ "pulverise": "pulverize",
1279
+ "pulverised": "pulverized",
1280
+ "pulverises": "pulverizes",
1281
+ "pulverising": "pulverizing",
1282
+ "pummelled": "pummel",
1283
+ "pummelling": "pummeled",
1284
+ "pyjama": "pajama",
1285
+ "pyjamas": "pajamas",
1286
+ "pzazz": "pizzazz",
1287
+ "quarrelled": "quarreled",
1288
+ "quarrelling": "quarreling",
1289
+ "radicalise": "radicalize",
1290
+ "radicalised": "radicalized",
1291
+ "radicalises": "radicalizes",
1292
+ "radicalising": "radicalizing",
1293
+ "rancour": "rancor",
1294
+ "randomise": "randomize",
1295
+ "randomised": "randomized",
1296
+ "randomises": "randomizes",
1297
+ "randomising": "randomizing",
1298
+ "rationalisation": "rationalization",
1299
+ "rationalisations": "rationalizations",
1300
+ "rationalise": "rationalize",
1301
+ "rationalised": "rationalized",
1302
+ "rationalises": "rationalizes",
1303
+ "rationalising": "rationalizing",
1304
+ "ravelled": "raveled",
1305
+ "ravelling": "raveling",
1306
+ "realisable": "realizable",
1307
+ "realisation": "realization",
1308
+ "realisations": "realizations",
1309
+ "realise": "realize",
1310
+ "realised": "realized",
1311
+ "realises": "realizes",
1312
+ "realising": "realizing",
1313
+ "recognisable": "recognizable",
1314
+ "recognisably": "recognizably",
1315
+ "recognisance": "recognizance",
1316
+ "recognise": "recognize",
1317
+ "recognised": "recognized",
1318
+ "recognises": "recognizes",
1319
+ "recognising": "recognizing",
1320
+ "reconnoitre": "reconnoiter",
1321
+ "reconnoitred": "reconnoitered",
1322
+ "reconnoitres": "reconnoiters",
1323
+ "reconnoitring": "reconnoitering",
1324
+ "refuelled": "refueled",
1325
+ "refuelling": "refueling",
1326
+ "regularisation": "regularization",
1327
+ "regularise": "regularize",
1328
+ "regularised": "regularized",
1329
+ "regularises": "regularizes",
1330
+ "regularising": "regularizing",
1331
+ "remodelled": "remodeled",
1332
+ "remodelling": "remodeling",
1333
+ "remould": "remold",
1334
+ "remoulded": "remolded",
1335
+ "remoulding": "remolding",
1336
+ "remoulds": "remolds",
1337
+ "reorganisation": "reorganization",
1338
+ "reorganisations": "reorganizations",
1339
+ "reorganise": "reorganize",
1340
+ "reorganised": "reorganized",
1341
+ "reorganises": "reorganizes",
1342
+ "reorganising": "reorganizing",
1343
+ "revelled": "reveled",
1344
+ "reveller": "reveler",
1345
+ "revellers": "revelers",
1346
+ "revelling": "reveling",
1347
+ "revitalise": "revitalize",
1348
+ "revitalised": "revitalized",
1349
+ "revitalises": "revitalizes",
1350
+ "revitalising": "revitalizing",
1351
+ "revolutionise": "revolutionize",
1352
+ "revolutionised": "revolutionized",
1353
+ "revolutionises": "revolutionizes",
1354
+ "revolutionising": "revolutionizing",
1355
+ "rhapsodise": "rhapsodize",
1356
+ "rhapsodised": "rhapsodized",
1357
+ "rhapsodises": "rhapsodizes",
1358
+ "rhapsodising": "rhapsodizing",
1359
+ "rigour": "rigor",
1360
+ "rigours": "rigors",
1361
+ "ritualised": "ritualized",
1362
+ "rivalled": "rivaled",
1363
+ "rivalling": "rivaling",
1364
+ "romanticise": "romanticize",
1365
+ "romanticised": "romanticized",
1366
+ "romanticises": "romanticizes",
1367
+ "romanticising": "romanticizing",
1368
+ "rumour": "rumor",
1369
+ "rumoured": "rumored",
1370
+ "rumours": "rumors",
1371
+ "sabre": "saber",
1372
+ "sabres": "sabers",
1373
+ "saltpetre": "saltpeter",
1374
+ "sanitise": "sanitize",
1375
+ "sanitised": "sanitized",
1376
+ "sanitises": "sanitizes",
1377
+ "sanitising": "sanitizing",
1378
+ "satirise": "satirize",
1379
+ "satirised": "satirized",
1380
+ "satirises": "satirizes",
1381
+ "satirising": "satirizing",
1382
+ "saviour": "savior",
1383
+ "saviours": "saviors",
1384
+ "savour": "savor",
1385
+ "savoured": "savored",
1386
+ "savouries": "savories",
1387
+ "savouring": "savoring",
1388
+ "savours": "savors",
1389
+ "savoury": "savory",
1390
+ "scandalise": "scandalize",
1391
+ "scandalised": "scandalized",
1392
+ "scandalises": "scandalizes",
1393
+ "scandalising": "scandalizing",
1394
+ "sceptic": "skeptic",
1395
+ "sceptical": "skeptical",
1396
+ "sceptically": "skeptically",
1397
+ "scepticism": "skepticism",
1398
+ "sceptics": "skeptics",
1399
+ "sceptre": "scepter",
1400
+ "sceptres": "scepters",
1401
+ "scrutinise": "scrutinize",
1402
+ "scrutinised": "scrutinized",
1403
+ "scrutinises": "scrutinizes",
1404
+ "scrutinising": "scrutinizing",
1405
+ "secularisation": "secularization",
1406
+ "secularise": "secularize",
1407
+ "secularised": "secularized",
1408
+ "secularises": "secularizes",
1409
+ "secularising": "secularizing",
1410
+ "sensationalise": "sensationalize",
1411
+ "sensationalised": "sensationalized",
1412
+ "sensationalises": "sensationalizes",
1413
+ "sensationalising": "sensationalizing",
1414
+ "sensitise": "sensitize",
1415
+ "sensitised": "sensitized",
1416
+ "sensitises": "sensitizes",
1417
+ "sensitising": "sensitizing",
1418
+ "sentimentalise": "sentimentalize",
1419
+ "sentimentalised": "sentimentalized",
1420
+ "sentimentalises": "sentimentalizes",
1421
+ "sentimentalising": "sentimentalizing",
1422
+ "sepulchre": "sepulcher",
1423
+ "sepulchres": "sepulchers",
1424
+ "serialisation": "serialization",
1425
+ "serialisations": "serializations",
1426
+ "serialise": "serialize",
1427
+ "serialised": "serialized",
1428
+ "serialises": "serializes",
1429
+ "serialising": "serializing",
1430
+ "sermonise": "sermonize",
1431
+ "sermonised": "sermonized",
1432
+ "sermonises": "sermonizes",
1433
+ "sermonising": "sermonizing",
1434
+ "sheikh": "sheik",
1435
+ "shovelled": "shoveled",
1436
+ "shovelling": "shoveling",
1437
+ "shrivelled": "shriveled",
1438
+ "shrivelling": "shriveling",
1439
+ "signalise": "signalize",
1440
+ "signalised": "signalized",
1441
+ "signalises": "signalizes",
1442
+ "signalising": "signalizing",
1443
+ "signalled": "signaled",
1444
+ "signalling": "signaling",
1445
+ "smoulder": "smolder",
1446
+ "smouldered": "smoldered",
1447
+ "smouldering": "smoldering",
1448
+ "smoulders": "smolders",
1449
+ "snivelled": "sniveled",
1450
+ "snivelling": "sniveling",
1451
+ "snorkelled": "snorkeled",
1452
+ "snorkelling": "snorkeling",
1453
+ "snowplough": "snowplow",
1454
+ "snowploughs": "snowplow",
1455
+ "socialisation": "socialization",
1456
+ "socialise": "socialize",
1457
+ "socialised": "socialized",
1458
+ "socialises": "socializes",
1459
+ "socialising": "socializing",
1460
+ "sodomise": "sodomize",
1461
+ "sodomised": "sodomized",
1462
+ "sodomises": "sodomizes",
1463
+ "sodomising": "sodomizing",
1464
+ "solemnise": "solemnize",
1465
+ "solemnised": "solemnized",
1466
+ "solemnises": "solemnizes",
1467
+ "solemnising": "solemnizing",
1468
+ "sombre": "somber",
1469
+ "specialisation": "specialization",
1470
+ "specialisations": "specializations",
1471
+ "specialise": "specialize",
1472
+ "specialised": "specialized",
1473
+ "specialises": "specializes",
1474
+ "specialising": "specializing",
1475
+ "spectre": "specter",
1476
+ "spectres": "specters",
1477
+ "spiralled": "spiraled",
1478
+ "spiralling": "spiraling",
1479
+ "splendour": "splendor",
1480
+ "splendours": "splendors",
1481
+ "squirrelled": "squirreled",
1482
+ "squirrelling": "squirreling",
1483
+ "stabilisation": "stabilization",
1484
+ "stabilise": "stabilize",
1485
+ "stabilised": "stabilized",
1486
+ "stabiliser": "stabilizer",
1487
+ "stabilisers": "stabilizers",
1488
+ "stabilises": "stabilizes",
1489
+ "stabilising": "stabilizing",
1490
+ "standardisation": "standardization",
1491
+ "standardise": "standardize",
1492
+ "standardised": "standardized",
1493
+ "standardises": "standardizes",
1494
+ "standardising": "standardizing",
1495
+ "stencilled": "stenciled",
1496
+ "stencilling": "stenciling",
1497
+ "sterilisation": "sterilization",
1498
+ "sterilisations": "sterilizations",
1499
+ "sterilise": "sterilize",
1500
+ "sterilised": "sterilized",
1501
+ "steriliser": "sterilizer",
1502
+ "sterilisers": "sterilizers",
1503
+ "sterilises": "sterilizes",
1504
+ "sterilising": "sterilizing",
1505
+ "stigmatisation": "stigmatization",
1506
+ "stigmatise": "stigmatize",
1507
+ "stigmatised": "stigmatized",
1508
+ "stigmatises": "stigmatizes",
1509
+ "stigmatising": "stigmatizing",
1510
+ "storey": "story",
1511
+ "storeys": "stories",
1512
+ "subsidisation": "subsidization",
1513
+ "subsidise": "subsidize",
1514
+ "subsidised": "subsidized",
1515
+ "subsidiser": "subsidizer",
1516
+ "subsidisers": "subsidizers",
1517
+ "subsidises": "subsidizes",
1518
+ "subsidising": "subsidizing",
1519
+ "succour": "succor",
1520
+ "succoured": "succored",
1521
+ "succouring": "succoring",
1522
+ "succours": "succors",
1523
+ "sulphate": "sulfate",
1524
+ "sulphates": "sulfates",
1525
+ "sulphide": "sulfide",
1526
+ "sulphides": "sulfides",
1527
+ "sulphur": "sulfur",
1528
+ "sulphurous": "sulfurous",
1529
+ "summarise": "summarize",
1530
+ "summarised": "summarized",
1531
+ "summarises": "summarizes",
1532
+ "summarising": "summarizing",
1533
+ "swivelled": "swiveled",
1534
+ "swivelling": "swiveling",
1535
+ "symbolise": "symbolize",
1536
+ "symbolised": "symbolized",
1537
+ "symbolises": "symbolizes",
1538
+ "symbolising": "symbolizing",
1539
+ "sympathise": "sympathize",
1540
+ "sympathised": "sympathized",
1541
+ "sympathiser": "sympathizer",
1542
+ "sympathisers": "sympathizers",
1543
+ "sympathises": "sympathizes",
1544
+ "sympathising": "sympathizing",
1545
+ "synchronisation": "synchronization",
1546
+ "synchronise": "synchronize",
1547
+ "synchronised": "synchronized",
1548
+ "synchronises": "synchronizes",
1549
+ "synchronising": "synchronizing",
1550
+ "synthesise": "synthesize",
1551
+ "synthesised": "synthesized",
1552
+ "synthesiser": "synthesizer",
1553
+ "synthesisers": "synthesizers",
1554
+ "synthesises": "synthesizes",
1555
+ "synthesising": "synthesizing",
1556
+ "syphon": "siphon",
1557
+ "syphoned": "siphoned",
1558
+ "syphoning": "siphoning",
1559
+ "syphons": "siphons",
1560
+ "systematisation": "systematization",
1561
+ "systematise": "systematize",
1562
+ "systematised": "systematized",
1563
+ "systematises": "systematizes",
1564
+ "systematising": "systematizing",
1565
+ "tantalise": "tantalize",
1566
+ "tantalised": "tantalized",
1567
+ "tantalises": "tantalizes",
1568
+ "tantalising": "tantalizing",
1569
+ "tantalisingly": "tantalizingly",
1570
+ "tasselled": "tasseled",
1571
+ "technicolour": "technicolor",
1572
+ "temporise": "temporize",
1573
+ "temporised": "temporized",
1574
+ "temporises": "temporizes",
1575
+ "temporising": "temporizing",
1576
+ "tenderise": "tenderize",
1577
+ "tenderised": "tenderized",
1578
+ "tenderises": "tenderizes",
1579
+ "tenderising": "tenderizing",
1580
+ "terrorise": "terrorize",
1581
+ "terrorised": "terrorized",
1582
+ "terrorises": "terrorizes",
1583
+ "terrorising": "terrorizing",
1584
+ "theatre": "theater",
1585
+ "theatregoer": "theatergoer",
1586
+ "theatregoers": "theatergoers",
1587
+ "theatres": "theaters",
1588
+ "theorise": "theorize",
1589
+ "theorised": "theorized",
1590
+ "theorises": "theorizes",
1591
+ "theorising": "theorizing",
1592
+ "tonne": "ton",
1593
+ "tonnes": "tons",
1594
+ "towelled": "toweled",
1595
+ "towelling": "toweling",
1596
+ "toxaemia": "toxemia",
1597
+ "tranquillise": "tranquilize",
1598
+ "tranquillised": "tranquilized",
1599
+ "tranquilliser": "tranquilizer",
1600
+ "tranquillisers": "tranquilizers",
1601
+ "tranquillises": "tranquilizes",
1602
+ "tranquillising": "tranquilizing",
1603
+ "tranquillity": "tranquility",
1604
+ "tranquillize": "tranquilize",
1605
+ "tranquillized": "tranquilized",
1606
+ "tranquillizer": "tranquilizer",
1607
+ "tranquillizers": "tranquilizers",
1608
+ "tranquillizes": "tranquilizes",
1609
+ "tranquillizing": "tranquilizing",
1610
+ "tranquilly": "tranquility",
1611
+ "transistorised": "transistorized",
1612
+ "traumatise": "traumatize",
1613
+ "traumatised": "traumatized",
1614
+ "traumatises": "traumatizes",
1615
+ "traumatising": "traumatizing",
1616
+ "travelled": "traveled",
1617
+ "traveller": "traveler",
1618
+ "travellers": "travelers",
1619
+ "travelling": "traveling",
1620
+ "travelog": "travelogue",
1621
+ "travelogs": "travelogues",
1622
+ "trialled": "trialed",
1623
+ "trialling": "trialing",
1624
+ "tricolour": "tricolor",
1625
+ "tricolours": "tricolors",
1626
+ "trivialise": "trivialize",
1627
+ "trivialised": "trivialized",
1628
+ "trivialises": "trivializes",
1629
+ "trivialising": "trivializing",
1630
+ "tumour": "tumor",
1631
+ "tumours": "tumors",
1632
+ "tunnelled": "tunneled",
1633
+ "tunnelling": "tunneling",
1634
+ "tyrannise": "tyrannize",
1635
+ "tyrannised": "tyrannized",
1636
+ "tyrannises": "tyrannizes",
1637
+ "tyrannising": "tyrannizing",
1638
+ "tyre": "tire",
1639
+ "tyres": "tires",
1640
+ "unauthorised": "unauthorized",
1641
+ "uncivilised": "uncivilized",
1642
+ "underutilised": "underutilized",
1643
+ "unequalled": "unequaled",
1644
+ "unfavourable": "unfavorable",
1645
+ "unfavourably": "unfavorably",
1646
+ "unionisation": "unionization",
1647
+ "unionise": "unionize",
1648
+ "unionised": "unionized",
1649
+ "unionises": "unionizes",
1650
+ "unionising": "unionizing",
1651
+ "unorganised": "unorganized",
1652
+ "unravelled": "unraveled",
1653
+ "unravelling": "unraveling",
1654
+ "unrecognisable": "unrecognizable",
1655
+ "unrecognised": "unrecognized",
1656
+ "unrivalled": "unrivaled",
1657
+ "unsavoury": "unsavory",
1658
+ "untrammelled": "untrammeled",
1659
+ "urbanisation": "urbanization",
1660
+ "urbanise": "urbanize",
1661
+ "urbanised": "urbanized",
1662
+ "urbanises": "urbanizes",
1663
+ "urbanising": "urbanizing",
1664
+ "utilisable": "utilizable",
1665
+ "utilisation": "utilization",
1666
+ "utilise": "utilize",
1667
+ "utilised": "utilized",
1668
+ "utilises": "utilizes",
1669
+ "utilising": "utilizing",
1670
+ "valour": "valor",
1671
+ "vandalise": "vandalize",
1672
+ "vandalised": "vandalized",
1673
+ "vandalises": "vandalizes",
1674
+ "vandalising": "vandalizing",
1675
+ "vaporisation": "vaporization",
1676
+ "vaporise": "vaporize",
1677
+ "vaporised": "vaporized",
1678
+ "vaporises": "vaporizes",
1679
+ "vaporising": "vaporizing",
1680
+ "vapour": "vapor",
1681
+ "vapours": "vapors",
1682
+ "verbalise": "verbalize",
1683
+ "verbalised": "verbalized",
1684
+ "verbalises": "verbalizes",
1685
+ "verbalising": "verbalizing",
1686
+ "victimisation": "victimization",
1687
+ "victimise": "victimize",
1688
+ "victimised": "victimized",
1689
+ "victimises": "victimizes",
1690
+ "victimising": "victimizing",
1691
+ "videodisc": "videodisk",
1692
+ "videodiscs": "videodisks",
1693
+ "vigour": "vigor",
1694
+ "visualisation": "visualization",
1695
+ "visualisations": "visualizations",
1696
+ "visualise": "visualize",
1697
+ "visualised": "visualized",
1698
+ "visualises": "visualizes",
1699
+ "visualising": "visualizing",
1700
+ "vocalisation": "vocalization",
1701
+ "vocalisations": "vocalizations",
1702
+ "vocalise": "vocalize",
1703
+ "vocalised": "vocalized",
1704
+ "vocalises": "vocalizes",
1705
+ "vocalising": "vocalizing",
1706
+ "vulcanised": "vulcanized",
1707
+ "vulgarisation": "vulgarization",
1708
+ "vulgarise": "vulgarize",
1709
+ "vulgarised": "vulgarized",
1710
+ "vulgarises": "vulgarizes",
1711
+ "vulgarising": "vulgarizing",
1712
+ "waggon": "wagon",
1713
+ "waggons": "wagons",
1714
+ "watercolour": "watercolor",
1715
+ "watercolours": "watercolors",
1716
+ "weaselled": "weaseled",
1717
+ "weaselling": "weaseling",
1718
+ "westernisation": "westernization",
1719
+ "westernise": "westernize",
1720
+ "westernised": "westernized",
1721
+ "westernises": "westernizes",
1722
+ "westernising": "westernizing",
1723
+ "womanise": "womanize",
1724
+ "womanised": "womanized",
1725
+ "womaniser": "womanizer",
1726
+ "womanisers": "womanizers",
1727
+ "womanises": "womanizes",
1728
+ "womanising": "womanizing",
1729
+ "woollen": "woolen",
1730
+ "woollens": "woolens",
1731
+ "woollies": "woolies",
1732
+ "woolly": "wooly",
1733
+ "worshipped": "worshiped",
1734
+ "worshipper": "worshiper",
1735
+ "worshipping": "worshiping",
1736
+ "yodelled": "yodeled",
1737
+ "yodelling": "yodeling",
1738
+ "yoghourt": "yogurt",
1739
+ "yoghourts": "yogurts",
1740
+ "yoghurt": "yogurt",
1741
+ "yoghurts": "yogurts"
1742
+ }
preprocessor_config.json ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:705c29d8f8f9970b7fd384c347f0309c5b784dd244a64eb1f721f226ae5fb4f8
3
+ size 3055754841
run.sh ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # source: https://github.com/kamfonas/whisper-fine-tuning-event/compare/master...minor-mods-by-farsipal
2
+
3
+ python run_speech_recognition_seq2seq_streaming.py \
4
+ --model_name_or_path 'openai/whisper-medium' \
5
+ --model_revision main \
6
+ --do_train True \
7
+ --do_eval True \
8
+ --use_auth_token False \
9
+ --freeze_feature_encoder False \
10
+ --freeze_encoder False \
11
+ --model_index_name 'Whisper Medium Danish (CV11 + FLEAURS)' \
12
+ --dataset_name 'mozilla-foundation/common_voice_11_0,google/fleurs' \
13
+ --dataset_config_name 'da,da_dk' \
14
+ --train_split_name 'train+validation,train+validation' \
15
+ --eval_split_name 'test,-' \
16
+ --text_column_name 'sentence,raw_transcription' \
17
+ --audio_column_name 'audio,audio' \
18
+ --streaming False \
19
+ --max_duration_in_seconds 30 \
20
+ --do_lower_case False \
21
+ --do_remove_punctuation False \
22
+ --do_normalize_eval True \
23
+ --language danish \
24
+ --task transcribe \
25
+ --shuffle_buffer_size 500 \
26
+ --output_dir './' \
27
+ --overwrite_output_dir True \
28
+ --per_device_train_batch_size 32 \
29
+ --gradient_accumulation_steps 1 \
30
+ --learning_rate 8e-6 \
31
+ --dropout 0.1 \
32
+ --warmup_steps 500 \
33
+ --max_steps 10000 \
34
+ --eval_steps 1000 \
35
+ --gradient_checkpointing True \
36
+ --cache_dir '~/.cache' \
37
+ --fp16 True \
38
+ --evaluation_strategy steps \
39
+ --per_device_eval_batch_size 8 \
40
+ --predict_with_generate True \
41
+ --generation_max_length 225 \
42
+ --save_steps 1000 \
43
+ --logging_steps 25 \
44
+ --report_to tensorboard \
45
+ --load_best_model_at_end True \
46
+ --metric_for_best_model wer \
47
+ --greater_is_better False \
48
+ --push_to_hub
run_speech_recognition_seq2seq_streaming.py ADDED
@@ -0,0 +1,769 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for sequence to sequence speech recognition
18
+ with 🤗 Datasets' streaming mode.
19
+ """
20
+ # This progam was modified by Michael Kamfonas (mkamfonas@infokarta.com) on Dec 11 2022
21
+ # - added options for drpout, gradient_checkpointing, use_cache, stopping_strategy and streaming
22
+ # - restructured it to enable both streaming and non-streaming modes
23
+ # - allows concatenation of mutiple datasets (single-string comma-separated) for interleaving
24
+ # The following params must have the same number of comma-separated (,) elements:
25
+ # dataset_name,
26
+ # dataset_config_name,
27
+ # train_split_name and eval_split_name (each element plus-separated (+) for multiple splits),
28
+ # text_column_name and audio_column_name
29
+
30
+
31
+ import logging
32
+ import os
33
+ import sys
34
+ from dataclasses import dataclass, field
35
+ from typing import Any, Dict, List, Optional, Union
36
+
37
+ import datasets
38
+ import torch
39
+ from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset
40
+ from torch.utils.data import IterableDataset
41
+
42
+ import evaluate
43
+ import transformers
44
+ from transformers import (
45
+ AutoConfig,
46
+ AutoFeatureExtractor,
47
+ AutoModelForSpeechSeq2Seq,
48
+ AutoProcessor,
49
+ AutoTokenizer,
50
+ HfArgumentParser,
51
+ Seq2SeqTrainer,
52
+ Seq2SeqTrainingArguments,
53
+ TrainerCallback,
54
+ set_seed,
55
+ )
56
+ from transformers.trainer_pt_utils import IterableDatasetShard
57
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
58
+ from transformers.utils import check_min_version, send_example_telemetry
59
+ from transformers.utils.versions import require_version
60
+ from transformers.models.whisper.english_normalizer import BasicTextNormalizer
61
+
62
+ TEXT_COL_NAME="text"
63
+ AUDIO_COL_NAME="audio"
64
+
65
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
66
+ check_min_version("4.25.0.dev0")
67
+
68
+ require_version("datasets>=1.18.2", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
69
+
70
+ logger = logging.getLogger(__name__)
71
+
72
+
73
+ @dataclass
74
+ class ModelArguments:
75
+ """
76
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
77
+ """
78
+
79
+ model_name_or_path: str = field(
80
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
81
+ )
82
+ config_name: Optional[str] = field(
83
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
84
+ )
85
+ tokenizer_name: Optional[str] = field(
86
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
87
+ )
88
+ feature_extractor_name: Optional[str] = field(
89
+ default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
90
+ )
91
+ cache_dir: Optional[str] = field(
92
+ default=None,
93
+ metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
94
+ )
95
+ use_fast_tokenizer: bool = field(
96
+ default=True,
97
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
98
+ )
99
+ model_revision: str = field(
100
+ default="main",
101
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
102
+ )
103
+ use_auth_token: bool = field(
104
+ default=False,
105
+ metadata={
106
+ "help": (
107
+ "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
108
+ "with private models)."
109
+ )
110
+ },
111
+ )
112
+ freeze_feature_encoder: bool = field(
113
+ default=True, metadata={"help": "Deprecated - Whether to freeze the feature encoder layers of the model."}
114
+ )
115
+ freeze_encoder: bool = field(
116
+ default=False, metadata={"help": "Whether to freeze the entire encoder of the seq2seq model."}
117
+ )
118
+ forced_decoder_ids: List[List[int]] = field(
119
+ default=None,
120
+ metadata={
121
+ "help": (
122
+ "A list of pairs of integers which indicates a mapping from generation indices to token indices "
123
+ "that will be forced before sampling. For example, [[0, 123]] means the first generated token "
124
+ "will always be a token of index 123."
125
+ )
126
+ },
127
+ )
128
+ suppress_tokens: List[int] = field(
129
+ default=None, metadata={"help": "A list of tokens that will be suppressed at generation."}
130
+ )
131
+ model_index_name: str = field(default=None, metadata={"help": "Pretty name for the model card."})
132
+
133
+ ## added by Michael Kamfonas
134
+ use_cache: bool = field(
135
+ default=False, metadata={"help": "Whether to use cache."}
136
+ )
137
+
138
+ dropout: float = field(
139
+ default = 0.0, metadata = {"help": "dropout probability."}
140
+ )
141
+
142
+ attention_dropout: float = field(
143
+ default = 0.0, metadata = {"help": "attention_dropout probability."}
144
+ )
145
+
146
+
147
+
148
+ @dataclass
149
+ class DataTrainingArguments:
150
+ """
151
+ Arguments pertaining to what data we are going to input our model for training and eval.
152
+ """
153
+
154
+ dataset_name: str = field(
155
+ default=None,
156
+ metadata={"help": "The name of the dataset to use (via the datasets library)."}
157
+ )
158
+ dataset_config_name: Optional[str] = field(
159
+ default=None,
160
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
161
+ )
162
+ text_column: Optional[str] = field(
163
+ default=None,
164
+ metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
165
+ )
166
+ max_train_samples: Optional[int] = field(
167
+ default=None,
168
+ metadata={
169
+ "help": (
170
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
171
+ "value if set."
172
+ )
173
+ },
174
+ )
175
+ max_eval_samples: Optional[int] = field(
176
+ default=None,
177
+ metadata={
178
+ "help": (
179
+ "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
180
+ "value if set."
181
+ )
182
+ },
183
+ )
184
+ audio_column_name: str = field(
185
+ default="audio",
186
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
187
+ )
188
+ text_column_name: str = field(
189
+ default="text",
190
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
191
+ )
192
+ max_duration_in_seconds: float = field(
193
+ default=20.0,
194
+ metadata={
195
+ "help": (
196
+ "Truncate audio files that are longer than `max_duration_in_seconds` seconds to"
197
+ " 'max_duration_in_seconds`"
198
+ )
199
+ },
200
+ )
201
+ min_duration_in_seconds: float = field(
202
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
203
+ )
204
+ train_split_name: str = field(
205
+ default="train",
206
+ metadata={
207
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
208
+ },
209
+ )
210
+ eval_split_name: str = field(
211
+ default="test",
212
+ metadata={
213
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
214
+ },
215
+ )
216
+ do_lower_case: bool = field(
217
+ default=False,
218
+ metadata={"help": "Whether the target text should be lower cased."},
219
+ )
220
+ do_remove_punctuation: bool = field(
221
+ default=False,
222
+ metadata={"help": "Whether the target text should be striped of punctuation."},
223
+ )
224
+ do_normalize_eval: bool = field(
225
+ default=True,
226
+ metadata={"help": "Whether to normalise the references and predictions in the eval WER calculation."},
227
+ )
228
+ language: str = field(
229
+ default=None,
230
+ metadata={
231
+ "help": (
232
+ "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning "
233
+ "only. For English speech recognition, it should be set to `None`."
234
+ )
235
+ },
236
+ )
237
+ task: str = field(
238
+ default="transcribe",
239
+ metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."},
240
+ )
241
+ shuffle_buffer_size: Optional[int] = field(
242
+ default=500,
243
+ metadata={
244
+ "help": (
245
+ "The number of streamed examples to download before shuffling them. The large the buffer, "
246
+ "the closer it is to real offline shuffling."
247
+ )
248
+ },
249
+ )
250
+ stopping_strategy: Optional[str] = field(
251
+ default="all_exhausted",
252
+ metadata={
253
+ "help": "Strategy used to consume interleaved data. Default = 'all_exhausted'"
254
+ }
255
+ )
256
+ streaming: bool = field(
257
+ default=True,
258
+ metadata={"help": "Whether to use streaming mode to load and pre-process the data."},
259
+ )
260
+
261
+ @dataclass
262
+ class DataCollatorSpeechSeq2SeqWithPadding:
263
+ """
264
+ Data collator that will dynamically pad the inputs received.
265
+ Args:
266
+ processor ([`WhisperProcessor`])
267
+ The processor used for processing the data.
268
+ decoder_start_token_id (`int`)
269
+ The begin-of-sentence of the decoder.
270
+ """
271
+
272
+ processor: Any
273
+ decoder_start_token_id: int
274
+
275
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
276
+ # split inputs and labels since they have to be of different lengths and need
277
+ # different padding methods
278
+ model_input_name = self.processor.model_input_names[0]
279
+ input_features = [{model_input_name: feature[model_input_name]} for feature in features]
280
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
281
+
282
+ batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
283
+
284
+ labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
285
+
286
+ # replace padding with -100 to ignore loss correctly
287
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
288
+
289
+ # if bos token is appended in previous tokenization step,
290
+ # cut bos token here as it's append later anyways
291
+ if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
292
+ labels = labels[:, 1:]
293
+
294
+ batch["labels"] = labels
295
+
296
+ return batch
297
+
298
+
299
+ def load_streaming_dataset(dataset_name, dataset_config_name, split="train", **kwargs):
300
+ """
301
+ Utility function to load a dataset in streaming mode. For datasets with multiple splits,
302
+ each split is loaded individually and then splits combined by taking alternating examples from
303
+ each (interleaving).
304
+ """
305
+ if "+" in split:
306
+ # load multiple splits separated by the `+` symbol with streaming mode
307
+ dataset_splits = [
308
+ load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=True, **kwargs)
309
+ for split_name in split.split("+")
310
+ ]
311
+ # interleave multiple splits to form one dataset
312
+ interleaved_dataset = interleave_datasets(dataset_splits)
313
+ return interleaved_dataset
314
+ else:
315
+ # load a single split *with* streaming mode
316
+ dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=True, **kwargs)
317
+ return dataset
318
+
319
+ def load_multiple_streaming_datasets(
320
+ dataset_names: List,
321
+ dataset_config_names: List,
322
+ splits: Optional[List] = None,
323
+ text_column_names: Optional[List] = None,
324
+ audio_column_names: Optional[List] = None,
325
+ sampling_rate: Optional[int] = 16000,
326
+ stopping_strategy: Optional[str] = "all_exhausted",
327
+ streaming = True,
328
+ **kwargs
329
+ ):
330
+
331
+ if len(dataset_names) != len(dataset_config_names):
332
+ raise ValueError(
333
+ f"Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and"
334
+ f" {len(dataset_config_names)} configs."
335
+ )
336
+
337
+ if splits is not None and len(splits) != len(dataset_names):
338
+ raise ValueError(
339
+ f"Ensure one train_split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits."
340
+ )
341
+
342
+ if text_column_names is not None and len(text_column_names) != len(dataset_names):
343
+ raise ValueError(
344
+ f"Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and"
345
+ f" {len(text_column_names)} text column names."
346
+ )
347
+
348
+ if audio_column_names is not None and len(audio_column_names) != len(dataset_names):
349
+ raise ValueError(
350
+ f"Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and"
351
+ f" {len(audio_column_names)} text column names."
352
+ )
353
+
354
+ splits = splits if splits is not None \
355
+ else ["train" for i in range(len(dataset_names))]
356
+
357
+ text_column_names = (
358
+ text_column_names if text_column_names is not None \
359
+ else [TEXT_COL_NAME for i in range(len(dataset_names))]
360
+ )
361
+
362
+ audio_column_names = (
363
+ audio_column_names if audio_column_names is not None \
364
+ else [AUDIO_COL_NAME for i in range(len(dataset_names))]
365
+ )
366
+
367
+ all_data_splits = []
368
+ # iterate over the datasets we want to interleave
369
+ for dset, cfgNm, splt, txtColNm, audColNm in zip(dataset_names,dataset_config_names,\
370
+ splits,text_column_names, audio_column_names):
371
+
372
+ dset_splits = [load_dataset(dset, cfgNm, split=c, streaming=streaming, **kwargs) \
373
+ for c in splt.split('+') if c != '-']
374
+
375
+ if streaming:
376
+ dset_splits = [ds if TEXT_COL_NAME in ds.features else ds.rename_column(txtColNm, TEXT_COL_NAME) \
377
+ for ds in dset_splits ]
378
+ dset_splits = [ds if AUDIO_COL_NAME in ds.features else ds.rename_column(audColNm, AUDIO_COL_NAME) \
379
+ for ds in dset_splits]
380
+
381
+ if len(dset_splits)>0 and sampling_rate != next(iter(dset_splits[0]))[AUDIO_COL_NAME]['sampling_rate']:
382
+ dset_splits = [ds.cast_column(AUDIO_COL_NAME, Audio(sampling_rate)) for ds in dset_splits]
383
+ else:
384
+
385
+ dset_splits = [ds if TEXT_COL_NAME in ds.column_names else ds.rename_column(txtColNm, TEXT_COL_NAME) \
386
+ for ds in dset_splits ]
387
+ dset_splits = [ds if AUDIO_COL_NAME in ds.column_names else ds.rename_column(audColNm, AUDIO_COL_NAME) \
388
+ for ds in dset_splits]
389
+
390
+ if len(dset_splits)>0 and sampling_rate != next(iter(dset_splits[0]))[AUDIO_COL_NAME]['sampling_rate']:
391
+ dset_splits = [ds.cast_column(AUDIO_COL_NAME, Audio(sampling_rate)) for ds in dset_splits]
392
+
393
+ cols2keep = set([AUDIO_COL_NAME, TEXT_COL_NAME])
394
+
395
+ dset_splits = [ds.remove_columns(set(ds.features.keys()) - cols2keep) for ds in dset_splits]
396
+
397
+ all_data_splits += dset_splits
398
+
399
+ return interleave_datasets(all_data_splits, stopping_strategy=stopping_strategy)
400
+
401
+ def main():
402
+ # 1. Parse input arguments
403
+ # See all possible arguments in src/transformers/training_args.py
404
+ # or by passing the --help flag to this script.
405
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
406
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
407
+
408
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
409
+ # If we pass only one argument to the script and it's the path to a json file,
410
+ # let's parse it to get our arguments.
411
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
412
+ else:
413
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
414
+
415
+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
416
+ # information sent is the one passed as arguments along with your Python/PyTorch versions.
417
+ send_example_telemetry("run_speech_recognition_seq2seq_streaming", model_args, data_args)
418
+
419
+ # 2. Setup logging
420
+ logging.basicConfig(
421
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
422
+ datefmt="%m/%d/%Y %H:%M:%S",
423
+ handlers=[logging.StreamHandler(sys.stdout)],
424
+ )
425
+ log_level = training_args.get_process_log_level()
426
+ logger.setLevel(log_level)
427
+ datasets.utils.logging.set_verbosity(log_level)
428
+ transformers.utils.logging.set_verbosity(log_level)
429
+ transformers.utils.logging.enable_default_handler()
430
+ transformers.utils.logging.enable_explicit_format()
431
+
432
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
433
+
434
+ # Log on each process the small summary:
435
+ logger.warning(
436
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
437
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
438
+ )
439
+ logger.info(f"Training/evaluation parameters {training_args}")
440
+
441
+ # Set the verbosity to info of the Transformers logger (on main process only):
442
+ if is_main_process(training_args.local_rank):
443
+ transformers.utils.logging.set_verbosity_info()
444
+ logger.info("Training/evaluation parameters %s", training_args)
445
+
446
+ # 3. Detecting last checkpoint and eventually continue from last checkpoint
447
+ last_checkpoint = None
448
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
449
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
450
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
451
+ raise ValueError(
452
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
453
+ "Use --overwrite_output_dir to overcome."
454
+ )
455
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
456
+ logger.info(
457
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
458
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
459
+ )
460
+
461
+ # Set seed before initializing model.
462
+ set_seed(training_args.seed)
463
+
464
+ # 5. Load pretrained model, tokenizer, and feature extractor
465
+ #
466
+ # Distributed training:
467
+ # The .from_pretrained methods guarantee that only one local process can concurrently
468
+ config = AutoConfig.from_pretrained(
469
+ model_args.config_name if model_args.config_name else model_args.model_name_or_path,
470
+ cache_dir=model_args.cache_dir,
471
+ revision=model_args.model_revision,
472
+ use_auth_token=True if model_args.use_auth_token else None,
473
+ )
474
+
475
+ config.update({ "forced_decoder_ids": model_args.forced_decoder_ids,
476
+ "suppress_tokens": model_args.suppress_tokens})
477
+
478
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
479
+ model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
480
+ cache_dir=model_args.cache_dir,
481
+ revision=model_args.model_revision,
482
+ use_auth_token=True if model_args.use_auth_token else None,
483
+ )
484
+ tokenizer = AutoTokenizer.from_pretrained(
485
+ model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
486
+ cache_dir=model_args.cache_dir,
487
+ use_fast=model_args.use_fast_tokenizer,
488
+ revision=model_args.model_revision,
489
+ use_auth_token=True if model_args.use_auth_token else None,
490
+ )
491
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
492
+ model_args.model_name_or_path,
493
+ config=config,
494
+ cache_dir=model_args.cache_dir,
495
+ revision=model_args.model_revision,
496
+ use_auth_token=True if model_args.use_auth_token else None,
497
+ )
498
+
499
+ model.config.use_cache = model_args.use_cache
500
+ model.config.dropout = model_args.dropout
501
+ model.config.attention_dropout = model_args.attention_dropout
502
+ if training_args.gradient_checkpointing:
503
+ model.gradient_checkpointing_enable()
504
+
505
+ if model.config.decoder_start_token_id is None:
506
+ raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
507
+
508
+ # deprecated
509
+ #if model_args.freeze_feature_encoder:
510
+ # model.freeze_feature_encoder()
511
+
512
+ if model_args.freeze_encoder:
513
+ model.freeze_encoder()
514
+ model.model.encoder.gradient_checkpointing = False
515
+
516
+ if data_args.language is not None:
517
+ # We only need to set the task id when the language is specified (i.e. in a multilingual setting)
518
+ tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task)
519
+
520
+
521
+ # 4. Load dataset
522
+ raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict()
523
+
524
+ # if training_args.do_train:
525
+ # raw_datasets["train"] = load_streaming_dataset(
526
+ # data_args.dataset_name,
527
+ # data_args.dataset_config_name,
528
+ # split=data_args.train_split_name,
529
+ # use_auth_token=True if model_args.use_auth_token else None,
530
+ # )
531
+
532
+ # if training_args.do_eval:
533
+ # raw_datasets["eval"] = load_streaming_dataset(
534
+ # data_args.dataset_name,
535
+ # data_args.dataset_config_name,
536
+ # split=data_args.eval_split_name,
537
+ # use_auth_token=True if model_args.use_auth_token else None,
538
+ # )
539
+
540
+ if training_args.do_train:
541
+ raw_datasets["train"] = load_multiple_streaming_datasets(
542
+ dataset_names=data_args.dataset_name.split(","),
543
+ dataset_config_names=data_args.dataset_config_name.split(","),
544
+ splits = data_args.train_split_name.split(","),
545
+ text_column_names = data_args.text_column_name.split(","),
546
+ sampling_rate = feature_extractor.sampling_rate,
547
+ streaming=data_args.streaming,
548
+ use_auth_token=True if model_args.use_auth_token else None,
549
+ )
550
+
551
+ if training_args.do_eval:
552
+ raw_datasets["eval"] = load_multiple_streaming_datasets(
553
+ dataset_names=data_args.dataset_name.split(","),
554
+ dataset_config_names=data_args.dataset_config_name.split(","),
555
+ splits = data_args.eval_split_name.split(","),
556
+ text_column_names = data_args.text_column_name.split(","),
557
+ sampling_rate = feature_extractor.sampling_rate,
558
+ streaming=False,
559
+ use_auth_token=True if model_args.use_auth_token else None,
560
+ )
561
+
562
+ raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys())
563
+
564
+ if AUDIO_COL_NAME not in raw_datasets_features:
565
+ raise ValueError(
566
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
567
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
568
+ f"{', '.join(raw_datasets_features)}."
569
+ )
570
+
571
+ if TEXT_COL_NAME not in raw_datasets_features:
572
+ raise ValueError(
573
+ f"--text_column_name {TEXT_COL_NAME} not found in dataset. "
574
+ "Make sure to set `--text_column_name` to the the respective correct text columns."
575
+ )
576
+
577
+
578
+ # 6. Resample speech dataset if necessary
579
+ #dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
580
+ #if dataset_sampling_rate != feature_extractor.sampling_rate:
581
+ # raw_datasets = raw_datasets.cast_column(
582
+ # data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
583
+ # )
584
+
585
+ # 7. Preprocessing the datasets.
586
+ # We need to read the audio files as arrays and tokenize the targets.
587
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
588
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
589
+ audio_column_name = AUDIO_COL_NAME
590
+ text_column_name = TEXT_COL_NAME
591
+ model_input_name = feature_extractor.model_input_names[0]
592
+ do_lower_case = data_args.do_lower_case
593
+ do_remove_punctuation = data_args.do_remove_punctuation
594
+ normalizer = BasicTextNormalizer() # 'official' text normalizer from OpenAI
595
+
596
+ if data_args.max_train_samples is not None:
597
+ raw_datasets["train"] = (
598
+ raw_datasets["train"].take(data_args.max_train_samples)
599
+ if data_args.streaming
600
+ else raw_datasets["train"].select(range(data_args.max_train_samples))
601
+ )
602
+
603
+ if data_args.max_eval_samples is not None:
604
+ raw_datasets["eval"] = (
605
+ raw_datasets["eval"].take(data_args.max_eval_samples)
606
+ if data_args.streaming
607
+ else raw_datasets["eval"].select(range(data_args.max_eval_samples))
608
+ )
609
+
610
+ def prepare_dataset(batch):
611
+ # process audio
612
+ sample = batch[audio_column_name]
613
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
614
+ # process audio length
615
+ batch[model_input_name] = inputs.get(model_input_name)[0]
616
+ batch["input_length"] = len(sample["array"])
617
+
618
+ # process targets
619
+ input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name]
620
+ if do_remove_punctuation:
621
+ input_str = normalizer(input_str).strip()
622
+ batch["labels"] = tokenizer(input_str).input_ids
623
+ return batch
624
+
625
+ with training_args.main_process_first(desc="dataset map pre-processing"):
626
+ vectorized_datasets = raw_datasets.map(
627
+ prepare_dataset,
628
+ remove_columns=raw_datasets_features,
629
+ num_proc=training_args.dataloader_num_workers if training_args.dataloader_num_workers else 1
630
+ ).with_format("torch")
631
+
632
+ if training_args.do_train and data_args.streaming:
633
+ # manually shuffle if streaming (done by the trainer for non-streaming)
634
+ vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(
635
+ buffer_size=data_args.shuffle_buffer_size,
636
+ seed=training_args.seed,
637
+ )
638
+
639
+ # filter training data that is shorter than min_input_length or longer than
640
+ # max_input_length
641
+ def is_audio_in_length_range(length):
642
+ return min_input_length < length < max_input_length
643
+
644
+ if training_args.do_train:
645
+ vectorized_datasets["train"] = vectorized_datasets["train"].filter(
646
+ is_audio_in_length_range,
647
+ input_columns=["input_length"],
648
+ )
649
+
650
+ # 8. Load Metric
651
+ metric = evaluate.load("wer")
652
+ do_normalize_eval = data_args.do_normalize_eval
653
+
654
+ def compute_metrics(pred):
655
+ pred_ids = pred.predictions
656
+
657
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
658
+
659
+ pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
660
+ # we do not want to group tokens when computing the metrics
661
+ label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
662
+
663
+ if do_normalize_eval:
664
+ pred_str = [normalizer(pred) for pred in pred_str]
665
+ label_str = [normalizer(label) for label in label_str]
666
+ # filtering step to only evaluate the samples that correspond to non-zero references:
667
+ pred_str = [pred_str[i] for i in range(len(pred_str)) if len(label_str[i]) > 0]
668
+ label_str = [label_str[i] for i in range(len(label_str)) if len(label_str[i]) > 0]
669
+
670
+ wer = 100 * metric.compute(predictions=pred_str, references=label_str)
671
+
672
+ return {"wer": wer}
673
+
674
+ # 9. Create a single speech processor
675
+ if is_main_process(training_args.local_rank):
676
+ # save feature extractor, tokenizer and config
677
+ feature_extractor.save_pretrained(training_args.output_dir)
678
+ tokenizer.save_pretrained(training_args.output_dir)
679
+ config.save_pretrained(training_args.output_dir)
680
+
681
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
682
+
683
+ # 10. Define data collator
684
+ data_collator = DataCollatorSpeechSeq2SeqWithPadding(
685
+ processor=processor,
686
+ decoder_start_token_id=model.config.decoder_start_token_id,
687
+ )
688
+
689
+ # 11. Configure Trainer
690
+ # Trainer callback to reinitialise and reshuffle the streamable datasets at the beginning of each epoch
691
+ # Only required for streaming: Trainer automatically shuffles non-streaming datasets
692
+ class ShuffleCallback(TrainerCallback):
693
+ def on_epoch_begin(self, args, state, control, train_dataloader, **kwargs):
694
+ if isinstance(train_dataloader.dataset, IterableDatasetShard):
695
+ pass # set_epoch() is handled by the Trainer
696
+ elif isinstance(train_dataloader.dataset, IterableDataset):
697
+ train_dataloader.dataset.set_epoch(train_dataloader.dataset._epoch + 1)
698
+
699
+ # Initialize Trainer
700
+ trainer = Seq2SeqTrainer(
701
+ model=model,
702
+ args=training_args,
703
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
704
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
705
+ tokenizer=feature_extractor,
706
+ data_collator=data_collator,
707
+ compute_metrics=compute_metrics if training_args.predict_with_generate else None,
708
+ callbacks=[ShuffleCallback()] if data_args.streaming else None,
709
+ )
710
+
711
+ # 12. Training
712
+ if training_args.do_train:
713
+ checkpoint = None
714
+ if training_args.resume_from_checkpoint is not None:
715
+ checkpoint = training_args.resume_from_checkpoint
716
+ elif last_checkpoint is not None:
717
+ checkpoint = last_checkpoint
718
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
719
+ trainer.save_model() # Saves the feature extractor too for easy upload
720
+
721
+ metrics = train_result.metrics
722
+ if data_args.max_train_samples:
723
+ metrics["train_samples"] = data_args.max_train_samples
724
+ trainer.log_metrics("train", metrics)
725
+ trainer.save_metrics("train", metrics)
726
+ trainer.save_state()
727
+
728
+ # 13. Evaluation
729
+ results = {}
730
+ if training_args.do_eval:
731
+ logger.info("*** Evaluate ***")
732
+ metrics = trainer.evaluate(
733
+ metric_key_prefix="eval",
734
+ max_length=training_args.generation_max_length,
735
+ num_beams=training_args.generation_num_beams,
736
+ )
737
+ if data_args.max_eval_samples:
738
+ metrics["eval_samples"] = data_args.max_eval_samples
739
+
740
+ trainer.log_metrics("eval", metrics)
741
+ trainer.save_metrics("eval", metrics)
742
+
743
+ # 14. Write Training Stats
744
+ kwargs = {
745
+ "finetuned_from": model_args.model_name_or_path,
746
+ "tasks": "automatic-speech-recognition",
747
+ "tags": "whisper-event",
748
+ }
749
+ if data_args.dataset_name is not None:
750
+ kwargs["dataset_tags"] = data_args.dataset_name
751
+ if data_args.dataset_config_name is not None:
752
+ kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
753
+ else:
754
+ kwargs["dataset"] = data_args.dataset_name
755
+ if "common_voice" in data_args.dataset_name:
756
+ kwargs["language"] = data_args.dataset_config_name[:2]
757
+ if model_args.model_index_name is not None:
758
+ kwargs["model_name"] = model_args.model_index_name
759
+
760
+ if training_args.push_to_hub:
761
+ trainer.push_to_hub(**kwargs)
762
+ else:
763
+ trainer.create_model_card(**kwargs)
764
+
765
+ return results
766
+
767
+
768
+ if __name__ == "__main__":
769
+ main()
runs/Dec18_19-31-26_150-136-92-72/1671392859.803059/events.out.tfevents.1671392859.150-136-92-72.3522376.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0ceb80046b312d6d4f6a912c505ee60a27e8c1423e7b51bdf1f3ed5bfaced17
3
+ size 5863
runs/Dec18_19-31-26_150-136-92-72/events.out.tfevents.1671392859.150-136-92-72.3522376.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67137c6c1fb87eb68b3419176c2ead80507b024b4ae641ccc4ee4dcede25638e
3
+ size 70581
runs/Dec18_19-31-26_150-136-92-72/events.out.tfevents.1671450244.150-136-92-72.3522376.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f8e1f98b6a01ff31c7996e218eaf4b7e895a3d912df49f0b37a427bc2e22d56
3
+ size 358
special_tokens_map.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|startoftranscript|>",
5
+ "<|en|>",
6
+ "<|zh|>",
7
+ "<|de|>",
8
+ "<|es|>",
9
+ "<|ru|>",
10
+ "<|ko|>",
11
+ "<|fr|>",
12
+ "<|ja|>",
13
+ "<|pt|>",
14
+ "<|tr|>",
15
+ "<|pl|>",
16
+ "<|ca|>",
17
+ "<|nl|>",
18
+ "<|ar|>",
19
+ "<|sv|>",
20
+ "<|it|>",
21
+ "<|id|>",
22
+ "<|hi|>",
23
+ "<|fi|>",
24
+ "<|vi|>",
25
+ "<|iw|>",
26
+ "<|uk|>",
27
+ "<|el|>",
28
+ "<|ms|>",
29
+ "<|cs|>",
30
+ "<|ro|>",
31
+ "<|da|>",
32
+ "<|hu|>",
33
+ "<|ta|>",
34
+ "<|no|>",
35
+ "<|th|>",
36
+ "<|ur|>",
37
+ "<|hr|>",
38
+ "<|bg|>",
39
+ "<|lt|>",
40
+ "<|la|>",
41
+ "<|mi|>",
42
+ "<|ml|>",
43
+ "<|cy|>",
44
+ "<|sk|>",
45
+ "<|te|>",
46
+ "<|fa|>",
47
+ "<|lv|>",
48
+ "<|bn|>",
49
+ "<|sr|>",
50
+ "<|az|>",
51
+ "<|sl|>",
52
+ "<|kn|>",
53
+ "<|et|>",
54
+ "<|mk|>",
55
+ "<|br|>",
56
+ "<|eu|>",
57
+ "<|is|>",
58
+ "<|hy|>",
59
+ "<|ne|>",
60
+ "<|mn|>",
61
+ "<|bs|>",
62
+ "<|kk|>",
63
+ "<|sq|>",
64
+ "<|sw|>",
65
+ "<|gl|>",
66
+ "<|mr|>",
67
+ "<|pa|>",
68
+ "<|si|>",
69
+ "<|km|>",
70
+ "<|sn|>",
71
+ "<|yo|>",
72
+ "<|so|>",
73
+ "<|af|>",
74
+ "<|oc|>",
75
+ "<|ka|>",
76
+ "<|be|>",
77
+ "<|tg|>",
78
+ "<|sd|>",
79
+ "<|gu|>",
80
+ "<|am|>",
81
+ "<|yi|>",
82
+ "<|lo|>",
83
+ "<|uz|>",
84
+ "<|fo|>",
85
+ "<|ht|>",
86
+ "<|ps|>",
87
+ "<|tk|>",
88
+ "<|nn|>",
89
+ "<|mt|>",
90
+ "<|sa|>",
91
+ "<|lb|>",
92
+ "<|my|>",
93
+ "<|bo|>",
94
+ "<|tl|>",
95
+ "<|mg|>",
96
+ "<|as|>",
97
+ "<|tt|>",
98
+ "<|haw|>",
99
+ "<|ln|>",
100
+ "<|ha|>",
101
+ "<|ba|>",
102
+ "<|jw|>",
103
+ "<|su|>",
104
+ "<|translate|>",
105
+ "<|transcribe|>",
106
+ "<|startoflm|>",
107
+ "<|startofprev|>",
108
+ "<|nocaptions|>",
109
+ "<|notimestamps|>"
110
+ ],
111
+ "bos_token": {
112
+ "content": "<|endoftext|>",
113
+ "lstrip": false,
114
+ "normalized": true,
115
+ "rstrip": false,
116
+ "single_word": false
117
+ },
118
+ "eos_token": {
119
+ "content": "<|endoftext|>",
120
+ "lstrip": false,
121
+ "normalized": true,
122
+ "rstrip": false,
123
+ "single_word": false
124
+ },
125
+ "pad_token": "<|endoftext|>",
126
+ "unk_token": {
127
+ "content": "",
128
+ "lstrip": false,
129
+ "normalized": true,
130
+ "rstrip": false,
131
+ "single_word": false
132
+ }
133
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 1024,
22
+ "name_or_path": "openai/whisper-medium",
23
+ "pad_token": null,
24
+ "processor_class": "WhisperProcessor",
25
+ "return_attention_mask": false,
26
+ "special_tokens_map_file": null,
27
+ "tokenizer_class": "WhisperTokenizer",
28
+ "unk_token": {
29
+ "__type": "AddedToken",
30
+ "content": "",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ }
36
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 31.45,
3
+ "train_loss": 0.0230206538159051,
4
+ "train_runtime": 56008.0991,
5
+ "train_samples_per_second": 5.713,
6
+ "train_steps_per_second": 0.179
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,2515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 13.708574434508153,
3
+ "best_model_checkpoint": "./checkpoint-10000",
4
+ "epoch": 31.446540880503143,
5
+ "global_step": 10000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.08,
12
+ "learning_rate": 3.6799999999999996e-07,
13
+ "loss": 1.2045,
14
+ "step": 25
15
+ },
16
+ {
17
+ "epoch": 0.16,
18
+ "learning_rate": 7.68e-07,
19
+ "loss": 0.997,
20
+ "step": 50
21
+ },
22
+ {
23
+ "epoch": 0.24,
24
+ "learning_rate": 1.1679999999999999e-06,
25
+ "loss": 0.6901,
26
+ "step": 75
27
+ },
28
+ {
29
+ "epoch": 0.31,
30
+ "learning_rate": 1.568e-06,
31
+ "loss": 0.4398,
32
+ "step": 100
33
+ },
34
+ {
35
+ "epoch": 0.39,
36
+ "learning_rate": 1.968e-06,
37
+ "loss": 0.3942,
38
+ "step": 125
39
+ },
40
+ {
41
+ "epoch": 0.47,
42
+ "learning_rate": 2.3679999999999996e-06,
43
+ "loss": 0.3263,
44
+ "step": 150
45
+ },
46
+ {
47
+ "epoch": 0.55,
48
+ "learning_rate": 2.7679999999999996e-06,
49
+ "loss": 0.3056,
50
+ "step": 175
51
+ },
52
+ {
53
+ "epoch": 0.63,
54
+ "learning_rate": 3.168e-06,
55
+ "loss": 0.2628,
56
+ "step": 200
57
+ },
58
+ {
59
+ "epoch": 0.71,
60
+ "learning_rate": 3.568e-06,
61
+ "loss": 0.2526,
62
+ "step": 225
63
+ },
64
+ {
65
+ "epoch": 0.79,
66
+ "learning_rate": 3.968e-06,
67
+ "loss": 0.257,
68
+ "step": 250
69
+ },
70
+ {
71
+ "epoch": 0.86,
72
+ "learning_rate": 4.368e-06,
73
+ "loss": 0.2296,
74
+ "step": 275
75
+ },
76
+ {
77
+ "epoch": 0.94,
78
+ "learning_rate": 4.768e-06,
79
+ "loss": 0.231,
80
+ "step": 300
81
+ },
82
+ {
83
+ "epoch": 1.02,
84
+ "learning_rate": 5.168e-06,
85
+ "loss": 0.194,
86
+ "step": 325
87
+ },
88
+ {
89
+ "epoch": 1.1,
90
+ "learning_rate": 5.567999999999999e-06,
91
+ "loss": 0.1423,
92
+ "step": 350
93
+ },
94
+ {
95
+ "epoch": 1.18,
96
+ "learning_rate": 5.9679999999999994e-06,
97
+ "loss": 0.1473,
98
+ "step": 375
99
+ },
100
+ {
101
+ "epoch": 1.26,
102
+ "learning_rate": 6.368e-06,
103
+ "loss": 0.1454,
104
+ "step": 400
105
+ },
106
+ {
107
+ "epoch": 1.34,
108
+ "learning_rate": 6.767999999999999e-06,
109
+ "loss": 0.143,
110
+ "step": 425
111
+ },
112
+ {
113
+ "epoch": 1.42,
114
+ "learning_rate": 7.168e-06,
115
+ "loss": 0.141,
116
+ "step": 450
117
+ },
118
+ {
119
+ "epoch": 1.49,
120
+ "learning_rate": 7.567999999999999e-06,
121
+ "loss": 0.1471,
122
+ "step": 475
123
+ },
124
+ {
125
+ "epoch": 1.57,
126
+ "learning_rate": 7.967999999999999e-06,
127
+ "loss": 0.1336,
128
+ "step": 500
129
+ },
130
+ {
131
+ "epoch": 1.65,
132
+ "learning_rate": 7.980631578947368e-06,
133
+ "loss": 0.1264,
134
+ "step": 525
135
+ },
136
+ {
137
+ "epoch": 1.73,
138
+ "learning_rate": 7.95957894736842e-06,
139
+ "loss": 0.1324,
140
+ "step": 550
141
+ },
142
+ {
143
+ "epoch": 1.81,
144
+ "learning_rate": 7.938526315789473e-06,
145
+ "loss": 0.1179,
146
+ "step": 575
147
+ },
148
+ {
149
+ "epoch": 1.89,
150
+ "learning_rate": 7.917473684210526e-06,
151
+ "loss": 0.1243,
152
+ "step": 600
153
+ },
154
+ {
155
+ "epoch": 1.97,
156
+ "learning_rate": 7.896421052631578e-06,
157
+ "loss": 0.1209,
158
+ "step": 625
159
+ },
160
+ {
161
+ "epoch": 2.04,
162
+ "learning_rate": 7.875368421052631e-06,
163
+ "loss": 0.0874,
164
+ "step": 650
165
+ },
166
+ {
167
+ "epoch": 2.12,
168
+ "learning_rate": 7.854315789473684e-06,
169
+ "loss": 0.0612,
170
+ "step": 675
171
+ },
172
+ {
173
+ "epoch": 2.2,
174
+ "learning_rate": 7.833263157894736e-06,
175
+ "loss": 0.0569,
176
+ "step": 700
177
+ },
178
+ {
179
+ "epoch": 2.28,
180
+ "learning_rate": 7.812210526315789e-06,
181
+ "loss": 0.0566,
182
+ "step": 725
183
+ },
184
+ {
185
+ "epoch": 2.36,
186
+ "learning_rate": 7.791157894736842e-06,
187
+ "loss": 0.0522,
188
+ "step": 750
189
+ },
190
+ {
191
+ "epoch": 2.44,
192
+ "learning_rate": 7.770105263157894e-06,
193
+ "loss": 0.0558,
194
+ "step": 775
195
+ },
196
+ {
197
+ "epoch": 2.52,
198
+ "learning_rate": 7.749052631578947e-06,
199
+ "loss": 0.0547,
200
+ "step": 800
201
+ },
202
+ {
203
+ "epoch": 2.59,
204
+ "learning_rate": 7.728e-06,
205
+ "loss": 0.0568,
206
+ "step": 825
207
+ },
208
+ {
209
+ "epoch": 2.67,
210
+ "learning_rate": 7.706947368421052e-06,
211
+ "loss": 0.0569,
212
+ "step": 850
213
+ },
214
+ {
215
+ "epoch": 2.75,
216
+ "learning_rate": 7.685894736842105e-06,
217
+ "loss": 0.0573,
218
+ "step": 875
219
+ },
220
+ {
221
+ "epoch": 2.83,
222
+ "learning_rate": 7.664842105263157e-06,
223
+ "loss": 0.0538,
224
+ "step": 900
225
+ },
226
+ {
227
+ "epoch": 2.91,
228
+ "learning_rate": 7.64378947368421e-06,
229
+ "loss": 0.054,
230
+ "step": 925
231
+ },
232
+ {
233
+ "epoch": 2.99,
234
+ "learning_rate": 7.622736842105263e-06,
235
+ "loss": 0.0553,
236
+ "step": 950
237
+ },
238
+ {
239
+ "epoch": 3.07,
240
+ "learning_rate": 7.6016842105263155e-06,
241
+ "loss": 0.0316,
242
+ "step": 975
243
+ },
244
+ {
245
+ "epoch": 3.14,
246
+ "learning_rate": 7.580631578947368e-06,
247
+ "loss": 0.0265,
248
+ "step": 1000
249
+ },
250
+ {
251
+ "epoch": 3.14,
252
+ "eval_loss": 0.3689558207988739,
253
+ "eval_runtime": 1285.0181,
254
+ "eval_samples_per_second": 1.66,
255
+ "eval_steps_per_second": 0.208,
256
+ "eval_wer": 14.760652288269332,
257
+ "step": 1000
258
+ },
259
+ {
260
+ "epoch": 3.22,
261
+ "learning_rate": 7.559578947368421e-06,
262
+ "loss": 0.0247,
263
+ "step": 1025
264
+ },
265
+ {
266
+ "epoch": 3.3,
267
+ "learning_rate": 7.5385263157894734e-06,
268
+ "loss": 0.0285,
269
+ "step": 1050
270
+ },
271
+ {
272
+ "epoch": 3.38,
273
+ "learning_rate": 7.517473684210526e-06,
274
+ "loss": 0.0266,
275
+ "step": 1075
276
+ },
277
+ {
278
+ "epoch": 3.46,
279
+ "learning_rate": 7.496421052631579e-06,
280
+ "loss": 0.0242,
281
+ "step": 1100
282
+ },
283
+ {
284
+ "epoch": 3.54,
285
+ "learning_rate": 7.475368421052631e-06,
286
+ "loss": 0.0286,
287
+ "step": 1125
288
+ },
289
+ {
290
+ "epoch": 3.62,
291
+ "learning_rate": 7.454315789473684e-06,
292
+ "loss": 0.0254,
293
+ "step": 1150
294
+ },
295
+ {
296
+ "epoch": 3.69,
297
+ "learning_rate": 7.433263157894736e-06,
298
+ "loss": 0.0303,
299
+ "step": 1175
300
+ },
301
+ {
302
+ "epoch": 3.77,
303
+ "learning_rate": 7.4122105263157885e-06,
304
+ "loss": 0.0281,
305
+ "step": 1200
306
+ },
307
+ {
308
+ "epoch": 3.85,
309
+ "learning_rate": 7.391157894736841e-06,
310
+ "loss": 0.0267,
311
+ "step": 1225
312
+ },
313
+ {
314
+ "epoch": 3.93,
315
+ "learning_rate": 7.370105263157895e-06,
316
+ "loss": 0.0272,
317
+ "step": 1250
318
+ },
319
+ {
320
+ "epoch": 4.01,
321
+ "learning_rate": 7.349052631578947e-06,
322
+ "loss": 0.0233,
323
+ "step": 1275
324
+ },
325
+ {
326
+ "epoch": 4.09,
327
+ "learning_rate": 7.328e-06,
328
+ "loss": 0.0133,
329
+ "step": 1300
330
+ },
331
+ {
332
+ "epoch": 4.17,
333
+ "learning_rate": 7.306947368421053e-06,
334
+ "loss": 0.0148,
335
+ "step": 1325
336
+ },
337
+ {
338
+ "epoch": 4.25,
339
+ "learning_rate": 7.285894736842105e-06,
340
+ "loss": 0.0153,
341
+ "step": 1350
342
+ },
343
+ {
344
+ "epoch": 4.32,
345
+ "learning_rate": 7.264842105263158e-06,
346
+ "loss": 0.016,
347
+ "step": 1375
348
+ },
349
+ {
350
+ "epoch": 4.4,
351
+ "learning_rate": 7.243789473684211e-06,
352
+ "loss": 0.0146,
353
+ "step": 1400
354
+ },
355
+ {
356
+ "epoch": 4.48,
357
+ "learning_rate": 7.222736842105262e-06,
358
+ "loss": 0.0147,
359
+ "step": 1425
360
+ },
361
+ {
362
+ "epoch": 4.56,
363
+ "learning_rate": 7.201684210526315e-06,
364
+ "loss": 0.0151,
365
+ "step": 1450
366
+ },
367
+ {
368
+ "epoch": 4.64,
369
+ "learning_rate": 7.180631578947368e-06,
370
+ "loss": 0.0135,
371
+ "step": 1475
372
+ },
373
+ {
374
+ "epoch": 4.72,
375
+ "learning_rate": 7.15957894736842e-06,
376
+ "loss": 0.0159,
377
+ "step": 1500
378
+ },
379
+ {
380
+ "epoch": 4.8,
381
+ "learning_rate": 7.138526315789473e-06,
382
+ "loss": 0.0166,
383
+ "step": 1525
384
+ },
385
+ {
386
+ "epoch": 4.87,
387
+ "learning_rate": 7.117473684210526e-06,
388
+ "loss": 0.0118,
389
+ "step": 1550
390
+ },
391
+ {
392
+ "epoch": 4.95,
393
+ "learning_rate": 7.096421052631578e-06,
394
+ "loss": 0.015,
395
+ "step": 1575
396
+ },
397
+ {
398
+ "epoch": 5.03,
399
+ "learning_rate": 7.075368421052632e-06,
400
+ "loss": 0.0121,
401
+ "step": 1600
402
+ },
403
+ {
404
+ "epoch": 5.11,
405
+ "learning_rate": 7.0543157894736845e-06,
406
+ "loss": 0.0098,
407
+ "step": 1625
408
+ },
409
+ {
410
+ "epoch": 5.19,
411
+ "learning_rate": 7.033263157894737e-06,
412
+ "loss": 0.0083,
413
+ "step": 1650
414
+ },
415
+ {
416
+ "epoch": 5.27,
417
+ "learning_rate": 7.012210526315789e-06,
418
+ "loss": 0.0092,
419
+ "step": 1675
420
+ },
421
+ {
422
+ "epoch": 5.35,
423
+ "learning_rate": 6.991157894736842e-06,
424
+ "loss": 0.0076,
425
+ "step": 1700
426
+ },
427
+ {
428
+ "epoch": 5.42,
429
+ "learning_rate": 6.970105263157894e-06,
430
+ "loss": 0.009,
431
+ "step": 1725
432
+ },
433
+ {
434
+ "epoch": 5.5,
435
+ "learning_rate": 6.949052631578947e-06,
436
+ "loss": 0.0098,
437
+ "step": 1750
438
+ },
439
+ {
440
+ "epoch": 5.58,
441
+ "learning_rate": 6.9279999999999996e-06,
442
+ "loss": 0.0069,
443
+ "step": 1775
444
+ },
445
+ {
446
+ "epoch": 5.66,
447
+ "learning_rate": 6.906947368421052e-06,
448
+ "loss": 0.0095,
449
+ "step": 1800
450
+ },
451
+ {
452
+ "epoch": 5.74,
453
+ "learning_rate": 6.885894736842105e-06,
454
+ "loss": 0.009,
455
+ "step": 1825
456
+ },
457
+ {
458
+ "epoch": 5.82,
459
+ "learning_rate": 6.8648421052631575e-06,
460
+ "loss": 0.0103,
461
+ "step": 1850
462
+ },
463
+ {
464
+ "epoch": 5.9,
465
+ "learning_rate": 6.84378947368421e-06,
466
+ "loss": 0.0106,
467
+ "step": 1875
468
+ },
469
+ {
470
+ "epoch": 5.97,
471
+ "learning_rate": 6.822736842105263e-06,
472
+ "loss": 0.0114,
473
+ "step": 1900
474
+ },
475
+ {
476
+ "epoch": 6.05,
477
+ "learning_rate": 6.8016842105263155e-06,
478
+ "loss": 0.0089,
479
+ "step": 1925
480
+ },
481
+ {
482
+ "epoch": 6.13,
483
+ "learning_rate": 6.780631578947368e-06,
484
+ "loss": 0.0055,
485
+ "step": 1950
486
+ },
487
+ {
488
+ "epoch": 6.21,
489
+ "learning_rate": 6.759578947368421e-06,
490
+ "loss": 0.0051,
491
+ "step": 1975
492
+ },
493
+ {
494
+ "epoch": 6.29,
495
+ "learning_rate": 6.7385263157894735e-06,
496
+ "loss": 0.0063,
497
+ "step": 2000
498
+ },
499
+ {
500
+ "epoch": 6.29,
501
+ "eval_loss": 0.4341892600059509,
502
+ "eval_runtime": 1246.3959,
503
+ "eval_samples_per_second": 1.711,
504
+ "eval_steps_per_second": 0.214,
505
+ "eval_wer": 14.092582851130983,
506
+ "step": 2000
507
+ },
508
+ {
509
+ "epoch": 6.37,
510
+ "learning_rate": 6.717473684210526e-06,
511
+ "loss": 0.006,
512
+ "step": 2025
513
+ },
514
+ {
515
+ "epoch": 6.45,
516
+ "learning_rate": 6.696421052631579e-06,
517
+ "loss": 0.005,
518
+ "step": 2050
519
+ },
520
+ {
521
+ "epoch": 6.53,
522
+ "learning_rate": 6.6753684210526314e-06,
523
+ "loss": 0.0047,
524
+ "step": 2075
525
+ },
526
+ {
527
+ "epoch": 6.6,
528
+ "learning_rate": 6.654315789473684e-06,
529
+ "loss": 0.0066,
530
+ "step": 2100
531
+ },
532
+ {
533
+ "epoch": 6.68,
534
+ "learning_rate": 6.633263157894737e-06,
535
+ "loss": 0.0057,
536
+ "step": 2125
537
+ },
538
+ {
539
+ "epoch": 6.76,
540
+ "learning_rate": 6.612210526315789e-06,
541
+ "loss": 0.0063,
542
+ "step": 2150
543
+ },
544
+ {
545
+ "epoch": 6.84,
546
+ "learning_rate": 6.591157894736841e-06,
547
+ "loss": 0.0067,
548
+ "step": 2175
549
+ },
550
+ {
551
+ "epoch": 6.92,
552
+ "learning_rate": 6.570105263157894e-06,
553
+ "loss": 0.006,
554
+ "step": 2200
555
+ },
556
+ {
557
+ "epoch": 7.0,
558
+ "learning_rate": 6.5490526315789465e-06,
559
+ "loss": 0.0066,
560
+ "step": 2225
561
+ },
562
+ {
563
+ "epoch": 7.08,
564
+ "learning_rate": 6.527999999999999e-06,
565
+ "loss": 0.0056,
566
+ "step": 2250
567
+ },
568
+ {
569
+ "epoch": 7.15,
570
+ "learning_rate": 6.506947368421053e-06,
571
+ "loss": 0.0036,
572
+ "step": 2275
573
+ },
574
+ {
575
+ "epoch": 7.23,
576
+ "learning_rate": 6.485894736842105e-06,
577
+ "loss": 0.0034,
578
+ "step": 2300
579
+ },
580
+ {
581
+ "epoch": 7.31,
582
+ "learning_rate": 6.464842105263158e-06,
583
+ "loss": 0.0027,
584
+ "step": 2325
585
+ },
586
+ {
587
+ "epoch": 7.39,
588
+ "learning_rate": 6.443789473684211e-06,
589
+ "loss": 0.0044,
590
+ "step": 2350
591
+ },
592
+ {
593
+ "epoch": 7.47,
594
+ "learning_rate": 6.422736842105263e-06,
595
+ "loss": 0.005,
596
+ "step": 2375
597
+ },
598
+ {
599
+ "epoch": 7.55,
600
+ "learning_rate": 6.401684210526316e-06,
601
+ "loss": 0.0034,
602
+ "step": 2400
603
+ },
604
+ {
605
+ "epoch": 7.63,
606
+ "learning_rate": 6.380631578947368e-06,
607
+ "loss": 0.0028,
608
+ "step": 2425
609
+ },
610
+ {
611
+ "epoch": 7.7,
612
+ "learning_rate": 6.35957894736842e-06,
613
+ "loss": 0.0037,
614
+ "step": 2450
615
+ },
616
+ {
617
+ "epoch": 7.78,
618
+ "learning_rate": 6.338526315789473e-06,
619
+ "loss": 0.0045,
620
+ "step": 2475
621
+ },
622
+ {
623
+ "epoch": 7.86,
624
+ "learning_rate": 6.317473684210526e-06,
625
+ "loss": 0.005,
626
+ "step": 2500
627
+ },
628
+ {
629
+ "epoch": 7.94,
630
+ "learning_rate": 6.296421052631578e-06,
631
+ "loss": 0.0063,
632
+ "step": 2525
633
+ },
634
+ {
635
+ "epoch": 8.02,
636
+ "learning_rate": 6.275368421052631e-06,
637
+ "loss": 0.0051,
638
+ "step": 2550
639
+ },
640
+ {
641
+ "epoch": 8.1,
642
+ "learning_rate": 6.254315789473684e-06,
643
+ "loss": 0.0038,
644
+ "step": 2575
645
+ },
646
+ {
647
+ "epoch": 8.18,
648
+ "learning_rate": 6.233263157894737e-06,
649
+ "loss": 0.0035,
650
+ "step": 2600
651
+ },
652
+ {
653
+ "epoch": 8.25,
654
+ "learning_rate": 6.21221052631579e-06,
655
+ "loss": 0.0036,
656
+ "step": 2625
657
+ },
658
+ {
659
+ "epoch": 8.33,
660
+ "learning_rate": 6.1911578947368425e-06,
661
+ "loss": 0.0034,
662
+ "step": 2650
663
+ },
664
+ {
665
+ "epoch": 8.41,
666
+ "learning_rate": 6.170105263157894e-06,
667
+ "loss": 0.0028,
668
+ "step": 2675
669
+ },
670
+ {
671
+ "epoch": 8.49,
672
+ "learning_rate": 6.149052631578947e-06,
673
+ "loss": 0.0028,
674
+ "step": 2700
675
+ },
676
+ {
677
+ "epoch": 8.57,
678
+ "learning_rate": 6.128e-06,
679
+ "loss": 0.0024,
680
+ "step": 2725
681
+ },
682
+ {
683
+ "epoch": 8.65,
684
+ "learning_rate": 6.106947368421052e-06,
685
+ "loss": 0.0029,
686
+ "step": 2750
687
+ },
688
+ {
689
+ "epoch": 8.73,
690
+ "learning_rate": 6.085894736842105e-06,
691
+ "loss": 0.0032,
692
+ "step": 2775
693
+ },
694
+ {
695
+ "epoch": 8.81,
696
+ "learning_rate": 6.0648421052631576e-06,
697
+ "loss": 0.0031,
698
+ "step": 2800
699
+ },
700
+ {
701
+ "epoch": 8.88,
702
+ "learning_rate": 6.04378947368421e-06,
703
+ "loss": 0.0023,
704
+ "step": 2825
705
+ },
706
+ {
707
+ "epoch": 8.96,
708
+ "learning_rate": 6.022736842105263e-06,
709
+ "loss": 0.0028,
710
+ "step": 2850
711
+ },
712
+ {
713
+ "epoch": 9.04,
714
+ "learning_rate": 6.0016842105263155e-06,
715
+ "loss": 0.0032,
716
+ "step": 2875
717
+ },
718
+ {
719
+ "epoch": 9.12,
720
+ "learning_rate": 5.980631578947368e-06,
721
+ "loss": 0.0037,
722
+ "step": 2900
723
+ },
724
+ {
725
+ "epoch": 9.2,
726
+ "learning_rate": 5.95957894736842e-06,
727
+ "loss": 0.0021,
728
+ "step": 2925
729
+ },
730
+ {
731
+ "epoch": 9.28,
732
+ "learning_rate": 5.9385263157894735e-06,
733
+ "loss": 0.0021,
734
+ "step": 2950
735
+ },
736
+ {
737
+ "epoch": 9.36,
738
+ "learning_rate": 5.917473684210526e-06,
739
+ "loss": 0.0022,
740
+ "step": 2975
741
+ },
742
+ {
743
+ "epoch": 9.43,
744
+ "learning_rate": 5.896421052631579e-06,
745
+ "loss": 0.0016,
746
+ "step": 3000
747
+ },
748
+ {
749
+ "epoch": 9.43,
750
+ "eval_loss": 0.48472946882247925,
751
+ "eval_runtime": 1245.478,
752
+ "eval_samples_per_second": 1.713,
753
+ "eval_steps_per_second": 0.214,
754
+ "eval_wer": 14.360862703840086,
755
+ "step": 3000
756
+ },
757
+ {
758
+ "epoch": 9.51,
759
+ "learning_rate": 5.8753684210526315e-06,
760
+ "loss": 0.002,
761
+ "step": 3025
762
+ },
763
+ {
764
+ "epoch": 9.59,
765
+ "learning_rate": 5.854315789473684e-06,
766
+ "loss": 0.0027,
767
+ "step": 3050
768
+ },
769
+ {
770
+ "epoch": 9.67,
771
+ "learning_rate": 5.833263157894737e-06,
772
+ "loss": 0.0027,
773
+ "step": 3075
774
+ },
775
+ {
776
+ "epoch": 9.75,
777
+ "learning_rate": 5.8122105263157894e-06,
778
+ "loss": 0.0038,
779
+ "step": 3100
780
+ },
781
+ {
782
+ "epoch": 9.83,
783
+ "learning_rate": 5.791157894736842e-06,
784
+ "loss": 0.0027,
785
+ "step": 3125
786
+ },
787
+ {
788
+ "epoch": 9.91,
789
+ "learning_rate": 5.770105263157895e-06,
790
+ "loss": 0.0021,
791
+ "step": 3150
792
+ },
793
+ {
794
+ "epoch": 9.98,
795
+ "learning_rate": 5.7490526315789465e-06,
796
+ "loss": 0.0024,
797
+ "step": 3175
798
+ },
799
+ {
800
+ "epoch": 10.06,
801
+ "learning_rate": 5.727999999999999e-06,
802
+ "loss": 0.0017,
803
+ "step": 3200
804
+ },
805
+ {
806
+ "epoch": 10.14,
807
+ "learning_rate": 5.706947368421052e-06,
808
+ "loss": 0.0024,
809
+ "step": 3225
810
+ },
811
+ {
812
+ "epoch": 10.22,
813
+ "learning_rate": 5.6858947368421045e-06,
814
+ "loss": 0.0023,
815
+ "step": 3250
816
+ },
817
+ {
818
+ "epoch": 10.3,
819
+ "learning_rate": 5.664842105263157e-06,
820
+ "loss": 0.0018,
821
+ "step": 3275
822
+ },
823
+ {
824
+ "epoch": 10.38,
825
+ "learning_rate": 5.643789473684211e-06,
826
+ "loss": 0.0019,
827
+ "step": 3300
828
+ },
829
+ {
830
+ "epoch": 10.46,
831
+ "learning_rate": 5.622736842105263e-06,
832
+ "loss": 0.0027,
833
+ "step": 3325
834
+ },
835
+ {
836
+ "epoch": 10.53,
837
+ "learning_rate": 5.601684210526316e-06,
838
+ "loss": 0.0027,
839
+ "step": 3350
840
+ },
841
+ {
842
+ "epoch": 10.61,
843
+ "learning_rate": 5.580631578947369e-06,
844
+ "loss": 0.0022,
845
+ "step": 3375
846
+ },
847
+ {
848
+ "epoch": 10.69,
849
+ "learning_rate": 5.559578947368421e-06,
850
+ "loss": 0.0019,
851
+ "step": 3400
852
+ },
853
+ {
854
+ "epoch": 10.77,
855
+ "learning_rate": 5.538526315789473e-06,
856
+ "loss": 0.0018,
857
+ "step": 3425
858
+ },
859
+ {
860
+ "epoch": 10.85,
861
+ "learning_rate": 5.517473684210526e-06,
862
+ "loss": 0.002,
863
+ "step": 3450
864
+ },
865
+ {
866
+ "epoch": 10.93,
867
+ "learning_rate": 5.496421052631578e-06,
868
+ "loss": 0.0034,
869
+ "step": 3475
870
+ },
871
+ {
872
+ "epoch": 11.01,
873
+ "learning_rate": 5.475368421052631e-06,
874
+ "loss": 0.0017,
875
+ "step": 3500
876
+ },
877
+ {
878
+ "epoch": 11.08,
879
+ "learning_rate": 5.454315789473684e-06,
880
+ "loss": 0.002,
881
+ "step": 3525
882
+ },
883
+ {
884
+ "epoch": 11.16,
885
+ "learning_rate": 5.433263157894736e-06,
886
+ "loss": 0.0019,
887
+ "step": 3550
888
+ },
889
+ {
890
+ "epoch": 11.24,
891
+ "learning_rate": 5.412210526315789e-06,
892
+ "loss": 0.0022,
893
+ "step": 3575
894
+ },
895
+ {
896
+ "epoch": 11.32,
897
+ "learning_rate": 5.391157894736842e-06,
898
+ "loss": 0.0015,
899
+ "step": 3600
900
+ },
901
+ {
902
+ "epoch": 11.4,
903
+ "learning_rate": 5.370105263157895e-06,
904
+ "loss": 0.0014,
905
+ "step": 3625
906
+ },
907
+ {
908
+ "epoch": 11.48,
909
+ "learning_rate": 5.349052631578948e-06,
910
+ "loss": 0.0017,
911
+ "step": 3650
912
+ },
913
+ {
914
+ "epoch": 11.56,
915
+ "learning_rate": 5.328e-06,
916
+ "loss": 0.0015,
917
+ "step": 3675
918
+ },
919
+ {
920
+ "epoch": 11.64,
921
+ "learning_rate": 5.306947368421052e-06,
922
+ "loss": 0.0016,
923
+ "step": 3700
924
+ },
925
+ {
926
+ "epoch": 11.71,
927
+ "learning_rate": 5.285894736842105e-06,
928
+ "loss": 0.0026,
929
+ "step": 3725
930
+ },
931
+ {
932
+ "epoch": 11.79,
933
+ "learning_rate": 5.264842105263158e-06,
934
+ "loss": 0.0026,
935
+ "step": 3750
936
+ },
937
+ {
938
+ "epoch": 11.87,
939
+ "learning_rate": 5.24378947368421e-06,
940
+ "loss": 0.0026,
941
+ "step": 3775
942
+ },
943
+ {
944
+ "epoch": 11.95,
945
+ "learning_rate": 5.222736842105263e-06,
946
+ "loss": 0.0019,
947
+ "step": 3800
948
+ },
949
+ {
950
+ "epoch": 12.03,
951
+ "learning_rate": 5.2016842105263156e-06,
952
+ "loss": 0.0019,
953
+ "step": 3825
954
+ },
955
+ {
956
+ "epoch": 12.11,
957
+ "learning_rate": 5.180631578947368e-06,
958
+ "loss": 0.0018,
959
+ "step": 3850
960
+ },
961
+ {
962
+ "epoch": 12.19,
963
+ "learning_rate": 5.159578947368421e-06,
964
+ "loss": 0.0021,
965
+ "step": 3875
966
+ },
967
+ {
968
+ "epoch": 12.26,
969
+ "learning_rate": 5.1385263157894735e-06,
970
+ "loss": 0.0015,
971
+ "step": 3900
972
+ },
973
+ {
974
+ "epoch": 12.34,
975
+ "learning_rate": 5.117473684210525e-06,
976
+ "loss": 0.0019,
977
+ "step": 3925
978
+ },
979
+ {
980
+ "epoch": 12.42,
981
+ "learning_rate": 5.096421052631578e-06,
982
+ "loss": 0.0026,
983
+ "step": 3950
984
+ },
985
+ {
986
+ "epoch": 12.5,
987
+ "learning_rate": 5.0753684210526315e-06,
988
+ "loss": 0.0015,
989
+ "step": 3975
990
+ },
991
+ {
992
+ "epoch": 12.58,
993
+ "learning_rate": 5.054315789473684e-06,
994
+ "loss": 0.002,
995
+ "step": 4000
996
+ },
997
+ {
998
+ "epoch": 12.58,
999
+ "eval_loss": 0.4919339120388031,
1000
+ "eval_runtime": 1243.533,
1001
+ "eval_samples_per_second": 1.715,
1002
+ "eval_steps_per_second": 0.215,
1003
+ "eval_wer": 14.171488690163073,
1004
+ "step": 4000
1005
+ },
1006
+ {
1007
+ "epoch": 12.66,
1008
+ "learning_rate": 5.033263157894737e-06,
1009
+ "loss": 0.0017,
1010
+ "step": 4025
1011
+ },
1012
+ {
1013
+ "epoch": 12.74,
1014
+ "learning_rate": 5.0122105263157895e-06,
1015
+ "loss": 0.0025,
1016
+ "step": 4050
1017
+ },
1018
+ {
1019
+ "epoch": 12.81,
1020
+ "learning_rate": 4.991157894736842e-06,
1021
+ "loss": 0.0025,
1022
+ "step": 4075
1023
+ },
1024
+ {
1025
+ "epoch": 12.89,
1026
+ "learning_rate": 4.970105263157895e-06,
1027
+ "loss": 0.0016,
1028
+ "step": 4100
1029
+ },
1030
+ {
1031
+ "epoch": 12.97,
1032
+ "learning_rate": 4.9490526315789474e-06,
1033
+ "loss": 0.0027,
1034
+ "step": 4125
1035
+ },
1036
+ {
1037
+ "epoch": 13.05,
1038
+ "learning_rate": 4.928e-06,
1039
+ "loss": 0.0021,
1040
+ "step": 4150
1041
+ },
1042
+ {
1043
+ "epoch": 13.13,
1044
+ "learning_rate": 4.906947368421052e-06,
1045
+ "loss": 0.0012,
1046
+ "step": 4175
1047
+ },
1048
+ {
1049
+ "epoch": 13.21,
1050
+ "learning_rate": 4.8858947368421045e-06,
1051
+ "loss": 0.0018,
1052
+ "step": 4200
1053
+ },
1054
+ {
1055
+ "epoch": 13.29,
1056
+ "learning_rate": 4.864842105263157e-06,
1057
+ "loss": 0.0012,
1058
+ "step": 4225
1059
+ },
1060
+ {
1061
+ "epoch": 13.36,
1062
+ "learning_rate": 4.84378947368421e-06,
1063
+ "loss": 0.0012,
1064
+ "step": 4250
1065
+ },
1066
+ {
1067
+ "epoch": 13.44,
1068
+ "learning_rate": 4.8227368421052625e-06,
1069
+ "loss": 0.0008,
1070
+ "step": 4275
1071
+ },
1072
+ {
1073
+ "epoch": 13.52,
1074
+ "learning_rate": 4.801684210526316e-06,
1075
+ "loss": 0.001,
1076
+ "step": 4300
1077
+ },
1078
+ {
1079
+ "epoch": 13.6,
1080
+ "learning_rate": 4.780631578947369e-06,
1081
+ "loss": 0.001,
1082
+ "step": 4325
1083
+ },
1084
+ {
1085
+ "epoch": 13.68,
1086
+ "learning_rate": 4.759578947368421e-06,
1087
+ "loss": 0.001,
1088
+ "step": 4350
1089
+ },
1090
+ {
1091
+ "epoch": 13.76,
1092
+ "learning_rate": 4.738526315789474e-06,
1093
+ "loss": 0.0011,
1094
+ "step": 4375
1095
+ },
1096
+ {
1097
+ "epoch": 13.84,
1098
+ "learning_rate": 4.717473684210527e-06,
1099
+ "loss": 0.0011,
1100
+ "step": 4400
1101
+ },
1102
+ {
1103
+ "epoch": 13.92,
1104
+ "learning_rate": 4.6964210526315784e-06,
1105
+ "loss": 0.0008,
1106
+ "step": 4425
1107
+ },
1108
+ {
1109
+ "epoch": 13.99,
1110
+ "learning_rate": 4.675368421052631e-06,
1111
+ "loss": 0.0021,
1112
+ "step": 4450
1113
+ },
1114
+ {
1115
+ "epoch": 14.07,
1116
+ "learning_rate": 4.654315789473684e-06,
1117
+ "loss": 0.0018,
1118
+ "step": 4475
1119
+ },
1120
+ {
1121
+ "epoch": 14.15,
1122
+ "learning_rate": 4.633263157894736e-06,
1123
+ "loss": 0.0013,
1124
+ "step": 4500
1125
+ },
1126
+ {
1127
+ "epoch": 14.23,
1128
+ "learning_rate": 4.612210526315789e-06,
1129
+ "loss": 0.0009,
1130
+ "step": 4525
1131
+ },
1132
+ {
1133
+ "epoch": 14.31,
1134
+ "learning_rate": 4.591157894736842e-06,
1135
+ "loss": 0.0012,
1136
+ "step": 4550
1137
+ },
1138
+ {
1139
+ "epoch": 14.39,
1140
+ "learning_rate": 4.570105263157894e-06,
1141
+ "loss": 0.0009,
1142
+ "step": 4575
1143
+ },
1144
+ {
1145
+ "epoch": 14.47,
1146
+ "learning_rate": 4.549052631578947e-06,
1147
+ "loss": 0.002,
1148
+ "step": 4600
1149
+ },
1150
+ {
1151
+ "epoch": 14.54,
1152
+ "learning_rate": 4.528e-06,
1153
+ "loss": 0.0011,
1154
+ "step": 4625
1155
+ },
1156
+ {
1157
+ "epoch": 14.62,
1158
+ "learning_rate": 4.506947368421053e-06,
1159
+ "loss": 0.0006,
1160
+ "step": 4650
1161
+ },
1162
+ {
1163
+ "epoch": 14.7,
1164
+ "learning_rate": 4.485894736842105e-06,
1165
+ "loss": 0.0013,
1166
+ "step": 4675
1167
+ },
1168
+ {
1169
+ "epoch": 14.78,
1170
+ "learning_rate": 4.464842105263158e-06,
1171
+ "loss": 0.0011,
1172
+ "step": 4700
1173
+ },
1174
+ {
1175
+ "epoch": 14.86,
1176
+ "learning_rate": 4.44378947368421e-06,
1177
+ "loss": 0.0013,
1178
+ "step": 4725
1179
+ },
1180
+ {
1181
+ "epoch": 14.94,
1182
+ "learning_rate": 4.422736842105263e-06,
1183
+ "loss": 0.0012,
1184
+ "step": 4750
1185
+ },
1186
+ {
1187
+ "epoch": 15.02,
1188
+ "learning_rate": 4.401684210526316e-06,
1189
+ "loss": 0.0024,
1190
+ "step": 4775
1191
+ },
1192
+ {
1193
+ "epoch": 15.09,
1194
+ "learning_rate": 4.380631578947368e-06,
1195
+ "loss": 0.0013,
1196
+ "step": 4800
1197
+ },
1198
+ {
1199
+ "epoch": 15.17,
1200
+ "learning_rate": 4.359578947368421e-06,
1201
+ "loss": 0.0013,
1202
+ "step": 4825
1203
+ },
1204
+ {
1205
+ "epoch": 15.25,
1206
+ "learning_rate": 4.3385263157894736e-06,
1207
+ "loss": 0.0014,
1208
+ "step": 4850
1209
+ },
1210
+ {
1211
+ "epoch": 15.33,
1212
+ "learning_rate": 4.317473684210526e-06,
1213
+ "loss": 0.0015,
1214
+ "step": 4875
1215
+ },
1216
+ {
1217
+ "epoch": 15.41,
1218
+ "learning_rate": 4.296421052631579e-06,
1219
+ "loss": 0.0006,
1220
+ "step": 4900
1221
+ },
1222
+ {
1223
+ "epoch": 15.49,
1224
+ "learning_rate": 4.275368421052631e-06,
1225
+ "loss": 0.0008,
1226
+ "step": 4925
1227
+ },
1228
+ {
1229
+ "epoch": 15.57,
1230
+ "learning_rate": 4.254315789473683e-06,
1231
+ "loss": 0.0008,
1232
+ "step": 4950
1233
+ },
1234
+ {
1235
+ "epoch": 15.64,
1236
+ "learning_rate": 4.233263157894737e-06,
1237
+ "loss": 0.0007,
1238
+ "step": 4975
1239
+ },
1240
+ {
1241
+ "epoch": 15.72,
1242
+ "learning_rate": 4.2122105263157895e-06,
1243
+ "loss": 0.0013,
1244
+ "step": 5000
1245
+ },
1246
+ {
1247
+ "epoch": 15.72,
1248
+ "eval_loss": 0.5114014744758606,
1249
+ "eval_runtime": 1251.3958,
1250
+ "eval_samples_per_second": 1.704,
1251
+ "eval_steps_per_second": 0.213,
1252
+ "eval_wer": 14.229352972119939,
1253
+ "step": 5000
1254
+ },
1255
+ {
1256
+ "epoch": 15.8,
1257
+ "learning_rate": 4.191157894736842e-06,
1258
+ "loss": 0.0011,
1259
+ "step": 5025
1260
+ },
1261
+ {
1262
+ "epoch": 15.88,
1263
+ "learning_rate": 4.170105263157895e-06,
1264
+ "loss": 0.0011,
1265
+ "step": 5050
1266
+ },
1267
+ {
1268
+ "epoch": 15.96,
1269
+ "learning_rate": 4.1490526315789475e-06,
1270
+ "loss": 0.0007,
1271
+ "step": 5075
1272
+ },
1273
+ {
1274
+ "epoch": 16.04,
1275
+ "learning_rate": 4.128e-06,
1276
+ "loss": 0.0011,
1277
+ "step": 5100
1278
+ },
1279
+ {
1280
+ "epoch": 16.12,
1281
+ "learning_rate": 4.106947368421053e-06,
1282
+ "loss": 0.0012,
1283
+ "step": 5125
1284
+ },
1285
+ {
1286
+ "epoch": 16.19,
1287
+ "learning_rate": 4.0858947368421054e-06,
1288
+ "loss": 0.0005,
1289
+ "step": 5150
1290
+ },
1291
+ {
1292
+ "epoch": 16.27,
1293
+ "learning_rate": 4.064842105263157e-06,
1294
+ "loss": 0.0016,
1295
+ "step": 5175
1296
+ },
1297
+ {
1298
+ "epoch": 16.35,
1299
+ "learning_rate": 4.04378947368421e-06,
1300
+ "loss": 0.0009,
1301
+ "step": 5200
1302
+ },
1303
+ {
1304
+ "epoch": 16.43,
1305
+ "learning_rate": 4.0227368421052625e-06,
1306
+ "loss": 0.0005,
1307
+ "step": 5225
1308
+ },
1309
+ {
1310
+ "epoch": 16.51,
1311
+ "learning_rate": 4.001684210526315e-06,
1312
+ "loss": 0.0006,
1313
+ "step": 5250
1314
+ },
1315
+ {
1316
+ "epoch": 16.59,
1317
+ "learning_rate": 3.980631578947369e-06,
1318
+ "loss": 0.0005,
1319
+ "step": 5275
1320
+ },
1321
+ {
1322
+ "epoch": 16.67,
1323
+ "learning_rate": 3.9595789473684205e-06,
1324
+ "loss": 0.0005,
1325
+ "step": 5300
1326
+ },
1327
+ {
1328
+ "epoch": 16.75,
1329
+ "learning_rate": 3.939368421052631e-06,
1330
+ "loss": 0.0015,
1331
+ "step": 5325
1332
+ },
1333
+ {
1334
+ "epoch": 16.82,
1335
+ "learning_rate": 3.918315789473684e-06,
1336
+ "loss": 0.0006,
1337
+ "step": 5350
1338
+ },
1339
+ {
1340
+ "epoch": 16.9,
1341
+ "learning_rate": 3.897263157894737e-06,
1342
+ "loss": 0.0005,
1343
+ "step": 5375
1344
+ },
1345
+ {
1346
+ "epoch": 16.98,
1347
+ "learning_rate": 3.87621052631579e-06,
1348
+ "loss": 0.0014,
1349
+ "step": 5400
1350
+ },
1351
+ {
1352
+ "epoch": 17.06,
1353
+ "learning_rate": 3.855157894736842e-06,
1354
+ "loss": 0.0012,
1355
+ "step": 5425
1356
+ },
1357
+ {
1358
+ "epoch": 17.14,
1359
+ "learning_rate": 3.834105263157894e-06,
1360
+ "loss": 0.0024,
1361
+ "step": 5450
1362
+ },
1363
+ {
1364
+ "epoch": 17.22,
1365
+ "learning_rate": 3.813052631578947e-06,
1366
+ "loss": 0.0007,
1367
+ "step": 5475
1368
+ },
1369
+ {
1370
+ "epoch": 17.3,
1371
+ "learning_rate": 3.7919999999999994e-06,
1372
+ "loss": 0.001,
1373
+ "step": 5500
1374
+ },
1375
+ {
1376
+ "epoch": 17.37,
1377
+ "learning_rate": 3.7709473684210525e-06,
1378
+ "loss": 0.0012,
1379
+ "step": 5525
1380
+ },
1381
+ {
1382
+ "epoch": 17.45,
1383
+ "learning_rate": 3.749894736842105e-06,
1384
+ "loss": 0.0008,
1385
+ "step": 5550
1386
+ },
1387
+ {
1388
+ "epoch": 17.53,
1389
+ "learning_rate": 3.728842105263158e-06,
1390
+ "loss": 0.0004,
1391
+ "step": 5575
1392
+ },
1393
+ {
1394
+ "epoch": 17.61,
1395
+ "learning_rate": 3.7077894736842105e-06,
1396
+ "loss": 0.0005,
1397
+ "step": 5600
1398
+ },
1399
+ {
1400
+ "epoch": 17.69,
1401
+ "learning_rate": 3.6867368421052627e-06,
1402
+ "loss": 0.0005,
1403
+ "step": 5625
1404
+ },
1405
+ {
1406
+ "epoch": 17.77,
1407
+ "learning_rate": 3.6656842105263154e-06,
1408
+ "loss": 0.0007,
1409
+ "step": 5650
1410
+ },
1411
+ {
1412
+ "epoch": 17.85,
1413
+ "learning_rate": 3.644631578947368e-06,
1414
+ "loss": 0.0011,
1415
+ "step": 5675
1416
+ },
1417
+ {
1418
+ "epoch": 17.92,
1419
+ "learning_rate": 3.623578947368421e-06,
1420
+ "loss": 0.0005,
1421
+ "step": 5700
1422
+ },
1423
+ {
1424
+ "epoch": 18.0,
1425
+ "learning_rate": 3.6025263157894738e-06,
1426
+ "loss": 0.0004,
1427
+ "step": 5725
1428
+ },
1429
+ {
1430
+ "epoch": 18.08,
1431
+ "learning_rate": 3.5814736842105264e-06,
1432
+ "loss": 0.0007,
1433
+ "step": 5750
1434
+ },
1435
+ {
1436
+ "epoch": 18.16,
1437
+ "learning_rate": 3.5604210526315786e-06,
1438
+ "loss": 0.0003,
1439
+ "step": 5775
1440
+ },
1441
+ {
1442
+ "epoch": 18.24,
1443
+ "learning_rate": 3.5393684210526313e-06,
1444
+ "loss": 0.0003,
1445
+ "step": 5800
1446
+ },
1447
+ {
1448
+ "epoch": 18.32,
1449
+ "learning_rate": 3.518315789473684e-06,
1450
+ "loss": 0.0003,
1451
+ "step": 5825
1452
+ },
1453
+ {
1454
+ "epoch": 18.4,
1455
+ "learning_rate": 3.4972631578947366e-06,
1456
+ "loss": 0.001,
1457
+ "step": 5850
1458
+ },
1459
+ {
1460
+ "epoch": 18.47,
1461
+ "learning_rate": 3.4762105263157897e-06,
1462
+ "loss": 0.0003,
1463
+ "step": 5875
1464
+ },
1465
+ {
1466
+ "epoch": 18.55,
1467
+ "learning_rate": 3.455157894736842e-06,
1468
+ "loss": 0.0004,
1469
+ "step": 5900
1470
+ },
1471
+ {
1472
+ "epoch": 18.63,
1473
+ "learning_rate": 3.4341052631578946e-06,
1474
+ "loss": 0.0003,
1475
+ "step": 5925
1476
+ },
1477
+ {
1478
+ "epoch": 18.71,
1479
+ "learning_rate": 3.4130526315789472e-06,
1480
+ "loss": 0.0004,
1481
+ "step": 5950
1482
+ },
1483
+ {
1484
+ "epoch": 18.79,
1485
+ "learning_rate": 3.392e-06,
1486
+ "loss": 0.0004,
1487
+ "step": 5975
1488
+ },
1489
+ {
1490
+ "epoch": 18.87,
1491
+ "learning_rate": 3.370947368421052e-06,
1492
+ "loss": 0.0014,
1493
+ "step": 6000
1494
+ },
1495
+ {
1496
+ "epoch": 18.87,
1497
+ "eval_loss": 0.5197107791900635,
1498
+ "eval_runtime": 1246.3616,
1499
+ "eval_samples_per_second": 1.711,
1500
+ "eval_steps_per_second": 0.214,
1501
+ "eval_wer": 13.913729615991583,
1502
+ "step": 6000
1503
+ },
1504
+ {
1505
+ "epoch": 18.95,
1506
+ "learning_rate": 3.349894736842105e-06,
1507
+ "loss": 0.0003,
1508
+ "step": 6025
1509
+ },
1510
+ {
1511
+ "epoch": 19.03,
1512
+ "learning_rate": 3.328842105263158e-06,
1513
+ "loss": 0.0003,
1514
+ "step": 6050
1515
+ },
1516
+ {
1517
+ "epoch": 19.1,
1518
+ "learning_rate": 3.3077894736842105e-06,
1519
+ "loss": 0.0004,
1520
+ "step": 6075
1521
+ },
1522
+ {
1523
+ "epoch": 19.18,
1524
+ "learning_rate": 3.286736842105263e-06,
1525
+ "loss": 0.0002,
1526
+ "step": 6100
1527
+ },
1528
+ {
1529
+ "epoch": 19.26,
1530
+ "learning_rate": 3.2656842105263154e-06,
1531
+ "loss": 0.0002,
1532
+ "step": 6125
1533
+ },
1534
+ {
1535
+ "epoch": 19.34,
1536
+ "learning_rate": 3.244631578947368e-06,
1537
+ "loss": 0.0004,
1538
+ "step": 6150
1539
+ },
1540
+ {
1541
+ "epoch": 19.42,
1542
+ "learning_rate": 3.2235789473684207e-06,
1543
+ "loss": 0.0005,
1544
+ "step": 6175
1545
+ },
1546
+ {
1547
+ "epoch": 19.5,
1548
+ "learning_rate": 3.2025263157894738e-06,
1549
+ "loss": 0.0003,
1550
+ "step": 6200
1551
+ },
1552
+ {
1553
+ "epoch": 19.58,
1554
+ "learning_rate": 3.1814736842105264e-06,
1555
+ "loss": 0.0007,
1556
+ "step": 6225
1557
+ },
1558
+ {
1559
+ "epoch": 19.65,
1560
+ "learning_rate": 3.160421052631579e-06,
1561
+ "loss": 0.0005,
1562
+ "step": 6250
1563
+ },
1564
+ {
1565
+ "epoch": 19.73,
1566
+ "learning_rate": 3.1393684210526313e-06,
1567
+ "loss": 0.0007,
1568
+ "step": 6275
1569
+ },
1570
+ {
1571
+ "epoch": 19.81,
1572
+ "learning_rate": 3.118315789473684e-06,
1573
+ "loss": 0.0002,
1574
+ "step": 6300
1575
+ },
1576
+ {
1577
+ "epoch": 19.89,
1578
+ "learning_rate": 3.0972631578947366e-06,
1579
+ "loss": 0.0008,
1580
+ "step": 6325
1581
+ },
1582
+ {
1583
+ "epoch": 19.97,
1584
+ "learning_rate": 3.0762105263157893e-06,
1585
+ "loss": 0.0013,
1586
+ "step": 6350
1587
+ },
1588
+ {
1589
+ "epoch": 20.05,
1590
+ "learning_rate": 3.0551578947368424e-06,
1591
+ "loss": 0.0004,
1592
+ "step": 6375
1593
+ },
1594
+ {
1595
+ "epoch": 20.13,
1596
+ "learning_rate": 3.0341052631578946e-06,
1597
+ "loss": 0.0002,
1598
+ "step": 6400
1599
+ },
1600
+ {
1601
+ "epoch": 20.2,
1602
+ "learning_rate": 3.0130526315789472e-06,
1603
+ "loss": 0.0004,
1604
+ "step": 6425
1605
+ },
1606
+ {
1607
+ "epoch": 20.28,
1608
+ "learning_rate": 2.992e-06,
1609
+ "loss": 0.0003,
1610
+ "step": 6450
1611
+ },
1612
+ {
1613
+ "epoch": 20.36,
1614
+ "learning_rate": 2.9709473684210526e-06,
1615
+ "loss": 0.0008,
1616
+ "step": 6475
1617
+ },
1618
+ {
1619
+ "epoch": 20.44,
1620
+ "learning_rate": 2.9498947368421048e-06,
1621
+ "loss": 0.0006,
1622
+ "step": 6500
1623
+ },
1624
+ {
1625
+ "epoch": 20.52,
1626
+ "learning_rate": 2.9288421052631574e-06,
1627
+ "loss": 0.0008,
1628
+ "step": 6525
1629
+ },
1630
+ {
1631
+ "epoch": 20.6,
1632
+ "learning_rate": 2.9077894736842105e-06,
1633
+ "loss": 0.0007,
1634
+ "step": 6550
1635
+ },
1636
+ {
1637
+ "epoch": 20.68,
1638
+ "learning_rate": 2.886736842105263e-06,
1639
+ "loss": 0.0007,
1640
+ "step": 6575
1641
+ },
1642
+ {
1643
+ "epoch": 20.75,
1644
+ "learning_rate": 2.865684210526316e-06,
1645
+ "loss": 0.0003,
1646
+ "step": 6600
1647
+ },
1648
+ {
1649
+ "epoch": 20.83,
1650
+ "learning_rate": 2.8446315789473685e-06,
1651
+ "loss": 0.0004,
1652
+ "step": 6625
1653
+ },
1654
+ {
1655
+ "epoch": 20.91,
1656
+ "learning_rate": 2.8235789473684207e-06,
1657
+ "loss": 0.0006,
1658
+ "step": 6650
1659
+ },
1660
+ {
1661
+ "epoch": 20.99,
1662
+ "learning_rate": 2.8025263157894734e-06,
1663
+ "loss": 0.0006,
1664
+ "step": 6675
1665
+ },
1666
+ {
1667
+ "epoch": 21.07,
1668
+ "learning_rate": 2.781473684210526e-06,
1669
+ "loss": 0.0008,
1670
+ "step": 6700
1671
+ },
1672
+ {
1673
+ "epoch": 21.15,
1674
+ "learning_rate": 2.760421052631579e-06,
1675
+ "loss": 0.0003,
1676
+ "step": 6725
1677
+ },
1678
+ {
1679
+ "epoch": 21.23,
1680
+ "learning_rate": 2.7393684210526318e-06,
1681
+ "loss": 0.0002,
1682
+ "step": 6750
1683
+ },
1684
+ {
1685
+ "epoch": 21.31,
1686
+ "learning_rate": 2.718315789473684e-06,
1687
+ "loss": 0.0002,
1688
+ "step": 6775
1689
+ },
1690
+ {
1691
+ "epoch": 21.38,
1692
+ "learning_rate": 2.6972631578947366e-06,
1693
+ "loss": 0.0003,
1694
+ "step": 6800
1695
+ },
1696
+ {
1697
+ "epoch": 21.46,
1698
+ "learning_rate": 2.6762105263157893e-06,
1699
+ "loss": 0.0005,
1700
+ "step": 6825
1701
+ },
1702
+ {
1703
+ "epoch": 21.54,
1704
+ "learning_rate": 2.655157894736842e-06,
1705
+ "loss": 0.0003,
1706
+ "step": 6850
1707
+ },
1708
+ {
1709
+ "epoch": 21.62,
1710
+ "learning_rate": 2.634105263157895e-06,
1711
+ "loss": 0.0004,
1712
+ "step": 6875
1713
+ },
1714
+ {
1715
+ "epoch": 21.7,
1716
+ "learning_rate": 2.6130526315789473e-06,
1717
+ "loss": 0.0003,
1718
+ "step": 6900
1719
+ },
1720
+ {
1721
+ "epoch": 21.78,
1722
+ "learning_rate": 2.592e-06,
1723
+ "loss": 0.0003,
1724
+ "step": 6925
1725
+ },
1726
+ {
1727
+ "epoch": 21.86,
1728
+ "learning_rate": 2.5709473684210526e-06,
1729
+ "loss": 0.0008,
1730
+ "step": 6950
1731
+ },
1732
+ {
1733
+ "epoch": 21.93,
1734
+ "learning_rate": 2.5498947368421052e-06,
1735
+ "loss": 0.0006,
1736
+ "step": 6975
1737
+ },
1738
+ {
1739
+ "epoch": 22.01,
1740
+ "learning_rate": 2.5288421052631575e-06,
1741
+ "loss": 0.0003,
1742
+ "step": 7000
1743
+ },
1744
+ {
1745
+ "epoch": 22.01,
1746
+ "eval_loss": 0.5421546101570129,
1747
+ "eval_runtime": 1231.4206,
1748
+ "eval_samples_per_second": 1.732,
1749
+ "eval_steps_per_second": 0.217,
1750
+ "eval_wer": 14.1977906365071,
1751
+ "step": 7000
1752
+ },
1753
+ {
1754
+ "epoch": 22.09,
1755
+ "learning_rate": 2.50778947368421e-06,
1756
+ "loss": 0.0005,
1757
+ "step": 7025
1758
+ },
1759
+ {
1760
+ "epoch": 22.17,
1761
+ "learning_rate": 2.486736842105263e-06,
1762
+ "loss": 0.0002,
1763
+ "step": 7050
1764
+ },
1765
+ {
1766
+ "epoch": 22.25,
1767
+ "learning_rate": 2.465684210526316e-06,
1768
+ "loss": 0.0003,
1769
+ "step": 7075
1770
+ },
1771
+ {
1772
+ "epoch": 22.33,
1773
+ "learning_rate": 2.4446315789473685e-06,
1774
+ "loss": 0.0002,
1775
+ "step": 7100
1776
+ },
1777
+ {
1778
+ "epoch": 22.41,
1779
+ "learning_rate": 2.423578947368421e-06,
1780
+ "loss": 0.0003,
1781
+ "step": 7125
1782
+ },
1783
+ {
1784
+ "epoch": 22.48,
1785
+ "learning_rate": 2.4025263157894734e-06,
1786
+ "loss": 0.0002,
1787
+ "step": 7150
1788
+ },
1789
+ {
1790
+ "epoch": 22.56,
1791
+ "learning_rate": 2.381473684210526e-06,
1792
+ "loss": 0.0004,
1793
+ "step": 7175
1794
+ },
1795
+ {
1796
+ "epoch": 22.64,
1797
+ "learning_rate": 2.3604210526315787e-06,
1798
+ "loss": 0.0002,
1799
+ "step": 7200
1800
+ },
1801
+ {
1802
+ "epoch": 22.72,
1803
+ "learning_rate": 2.3393684210526318e-06,
1804
+ "loss": 0.0004,
1805
+ "step": 7225
1806
+ },
1807
+ {
1808
+ "epoch": 22.8,
1809
+ "learning_rate": 2.3183157894736844e-06,
1810
+ "loss": 0.0002,
1811
+ "step": 7250
1812
+ },
1813
+ {
1814
+ "epoch": 22.88,
1815
+ "learning_rate": 2.2972631578947367e-06,
1816
+ "loss": 0.0002,
1817
+ "step": 7275
1818
+ },
1819
+ {
1820
+ "epoch": 22.96,
1821
+ "learning_rate": 2.2762105263157893e-06,
1822
+ "loss": 0.0002,
1823
+ "step": 7300
1824
+ },
1825
+ {
1826
+ "epoch": 23.03,
1827
+ "learning_rate": 2.255157894736842e-06,
1828
+ "loss": 0.0003,
1829
+ "step": 7325
1830
+ },
1831
+ {
1832
+ "epoch": 23.11,
1833
+ "learning_rate": 2.2341052631578946e-06,
1834
+ "loss": 0.0002,
1835
+ "step": 7350
1836
+ },
1837
+ {
1838
+ "epoch": 23.19,
1839
+ "learning_rate": 2.213052631578947e-06,
1840
+ "loss": 0.0002,
1841
+ "step": 7375
1842
+ },
1843
+ {
1844
+ "epoch": 23.27,
1845
+ "learning_rate": 2.192e-06,
1846
+ "loss": 0.0001,
1847
+ "step": 7400
1848
+ },
1849
+ {
1850
+ "epoch": 23.35,
1851
+ "learning_rate": 2.1709473684210526e-06,
1852
+ "loss": 0.0001,
1853
+ "step": 7425
1854
+ },
1855
+ {
1856
+ "epoch": 23.43,
1857
+ "learning_rate": 2.1498947368421052e-06,
1858
+ "loss": 0.0001,
1859
+ "step": 7450
1860
+ },
1861
+ {
1862
+ "epoch": 23.51,
1863
+ "learning_rate": 2.128842105263158e-06,
1864
+ "loss": 0.0001,
1865
+ "step": 7475
1866
+ },
1867
+ {
1868
+ "epoch": 23.58,
1869
+ "learning_rate": 2.10778947368421e-06,
1870
+ "loss": 0.0001,
1871
+ "step": 7500
1872
+ },
1873
+ {
1874
+ "epoch": 23.66,
1875
+ "learning_rate": 2.0867368421052628e-06,
1876
+ "loss": 0.0001,
1877
+ "step": 7525
1878
+ },
1879
+ {
1880
+ "epoch": 23.74,
1881
+ "learning_rate": 2.065684210526316e-06,
1882
+ "loss": 0.0001,
1883
+ "step": 7550
1884
+ },
1885
+ {
1886
+ "epoch": 23.82,
1887
+ "learning_rate": 2.0446315789473685e-06,
1888
+ "loss": 0.0001,
1889
+ "step": 7575
1890
+ },
1891
+ {
1892
+ "epoch": 23.9,
1893
+ "learning_rate": 2.023578947368421e-06,
1894
+ "loss": 0.0001,
1895
+ "step": 7600
1896
+ },
1897
+ {
1898
+ "epoch": 23.98,
1899
+ "learning_rate": 2.002526315789474e-06,
1900
+ "loss": 0.0001,
1901
+ "step": 7625
1902
+ },
1903
+ {
1904
+ "epoch": 24.06,
1905
+ "learning_rate": 1.981473684210526e-06,
1906
+ "loss": 0.0001,
1907
+ "step": 7650
1908
+ },
1909
+ {
1910
+ "epoch": 24.14,
1911
+ "learning_rate": 1.9604210526315787e-06,
1912
+ "loss": 0.0001,
1913
+ "step": 7675
1914
+ },
1915
+ {
1916
+ "epoch": 24.21,
1917
+ "learning_rate": 1.9393684210526314e-06,
1918
+ "loss": 0.0001,
1919
+ "step": 7700
1920
+ },
1921
+ {
1922
+ "epoch": 24.29,
1923
+ "learning_rate": 1.918315789473684e-06,
1924
+ "loss": 0.0001,
1925
+ "step": 7725
1926
+ },
1927
+ {
1928
+ "epoch": 24.37,
1929
+ "learning_rate": 1.8972631578947367e-06,
1930
+ "loss": 0.0001,
1931
+ "step": 7750
1932
+ },
1933
+ {
1934
+ "epoch": 24.45,
1935
+ "learning_rate": 1.8762105263157895e-06,
1936
+ "loss": 0.0001,
1937
+ "step": 7775
1938
+ },
1939
+ {
1940
+ "epoch": 24.53,
1941
+ "learning_rate": 1.855157894736842e-06,
1942
+ "loss": 0.0001,
1943
+ "step": 7800
1944
+ },
1945
+ {
1946
+ "epoch": 24.61,
1947
+ "learning_rate": 1.8341052631578946e-06,
1948
+ "loss": 0.0001,
1949
+ "step": 7825
1950
+ },
1951
+ {
1952
+ "epoch": 24.69,
1953
+ "learning_rate": 1.8130526315789473e-06,
1954
+ "loss": 0.0001,
1955
+ "step": 7850
1956
+ },
1957
+ {
1958
+ "epoch": 24.76,
1959
+ "learning_rate": 1.792e-06,
1960
+ "loss": 0.0001,
1961
+ "step": 7875
1962
+ },
1963
+ {
1964
+ "epoch": 24.84,
1965
+ "learning_rate": 1.7709473684210526e-06,
1966
+ "loss": 0.0001,
1967
+ "step": 7900
1968
+ },
1969
+ {
1970
+ "epoch": 24.92,
1971
+ "learning_rate": 1.749894736842105e-06,
1972
+ "loss": 0.0001,
1973
+ "step": 7925
1974
+ },
1975
+ {
1976
+ "epoch": 25.0,
1977
+ "learning_rate": 1.728842105263158e-06,
1978
+ "loss": 0.0001,
1979
+ "step": 7950
1980
+ },
1981
+ {
1982
+ "epoch": 25.08,
1983
+ "learning_rate": 1.7077894736842104e-06,
1984
+ "loss": 0.0001,
1985
+ "step": 7975
1986
+ },
1987
+ {
1988
+ "epoch": 25.16,
1989
+ "learning_rate": 1.686736842105263e-06,
1990
+ "loss": 0.0001,
1991
+ "step": 8000
1992
+ },
1993
+ {
1994
+ "epoch": 25.16,
1995
+ "eval_loss": 0.5658935904502869,
1996
+ "eval_runtime": 1242.8998,
1997
+ "eval_samples_per_second": 1.716,
1998
+ "eval_steps_per_second": 0.215,
1999
+ "eval_wer": 13.871646501841136,
2000
+ "step": 8000
2001
+ },
2002
+ {
2003
+ "epoch": 25.24,
2004
+ "learning_rate": 1.6656842105263159e-06,
2005
+ "loss": 0.0001,
2006
+ "step": 8025
2007
+ },
2008
+ {
2009
+ "epoch": 25.31,
2010
+ "learning_rate": 1.6446315789473683e-06,
2011
+ "loss": 0.0001,
2012
+ "step": 8050
2013
+ },
2014
+ {
2015
+ "epoch": 25.39,
2016
+ "learning_rate": 1.623578947368421e-06,
2017
+ "loss": 0.0001,
2018
+ "step": 8075
2019
+ },
2020
+ {
2021
+ "epoch": 25.47,
2022
+ "learning_rate": 1.6025263157894734e-06,
2023
+ "loss": 0.0001,
2024
+ "step": 8100
2025
+ },
2026
+ {
2027
+ "epoch": 25.55,
2028
+ "learning_rate": 1.5814736842105263e-06,
2029
+ "loss": 0.0001,
2030
+ "step": 8125
2031
+ },
2032
+ {
2033
+ "epoch": 25.63,
2034
+ "learning_rate": 1.560421052631579e-06,
2035
+ "loss": 0.0001,
2036
+ "step": 8150
2037
+ },
2038
+ {
2039
+ "epoch": 25.71,
2040
+ "learning_rate": 1.5393684210526314e-06,
2041
+ "loss": 0.0001,
2042
+ "step": 8175
2043
+ },
2044
+ {
2045
+ "epoch": 25.79,
2046
+ "learning_rate": 1.5183157894736843e-06,
2047
+ "loss": 0.0001,
2048
+ "step": 8200
2049
+ },
2050
+ {
2051
+ "epoch": 25.86,
2052
+ "learning_rate": 1.4972631578947367e-06,
2053
+ "loss": 0.0001,
2054
+ "step": 8225
2055
+ },
2056
+ {
2057
+ "epoch": 25.94,
2058
+ "learning_rate": 1.4762105263157894e-06,
2059
+ "loss": 0.0001,
2060
+ "step": 8250
2061
+ },
2062
+ {
2063
+ "epoch": 26.02,
2064
+ "learning_rate": 1.4551578947368422e-06,
2065
+ "loss": 0.0001,
2066
+ "step": 8275
2067
+ },
2068
+ {
2069
+ "epoch": 26.1,
2070
+ "learning_rate": 1.4341052631578947e-06,
2071
+ "loss": 0.0001,
2072
+ "step": 8300
2073
+ },
2074
+ {
2075
+ "epoch": 26.18,
2076
+ "learning_rate": 1.4130526315789473e-06,
2077
+ "loss": 0.0001,
2078
+ "step": 8325
2079
+ },
2080
+ {
2081
+ "epoch": 26.26,
2082
+ "learning_rate": 1.3919999999999998e-06,
2083
+ "loss": 0.0001,
2084
+ "step": 8350
2085
+ },
2086
+ {
2087
+ "epoch": 26.34,
2088
+ "learning_rate": 1.3709473684210526e-06,
2089
+ "loss": 0.0001,
2090
+ "step": 8375
2091
+ },
2092
+ {
2093
+ "epoch": 26.42,
2094
+ "learning_rate": 1.3498947368421053e-06,
2095
+ "loss": 0.0001,
2096
+ "step": 8400
2097
+ },
2098
+ {
2099
+ "epoch": 26.49,
2100
+ "learning_rate": 1.3288421052631577e-06,
2101
+ "loss": 0.0001,
2102
+ "step": 8425
2103
+ },
2104
+ {
2105
+ "epoch": 26.57,
2106
+ "learning_rate": 1.3077894736842106e-06,
2107
+ "loss": 0.0001,
2108
+ "step": 8450
2109
+ },
2110
+ {
2111
+ "epoch": 26.65,
2112
+ "learning_rate": 1.286736842105263e-06,
2113
+ "loss": 0.0001,
2114
+ "step": 8475
2115
+ },
2116
+ {
2117
+ "epoch": 26.73,
2118
+ "learning_rate": 1.2656842105263157e-06,
2119
+ "loss": 0.0001,
2120
+ "step": 8500
2121
+ },
2122
+ {
2123
+ "epoch": 26.81,
2124
+ "learning_rate": 1.2446315789473683e-06,
2125
+ "loss": 0.0001,
2126
+ "step": 8525
2127
+ },
2128
+ {
2129
+ "epoch": 26.89,
2130
+ "learning_rate": 1.223578947368421e-06,
2131
+ "loss": 0.0001,
2132
+ "step": 8550
2133
+ },
2134
+ {
2135
+ "epoch": 26.97,
2136
+ "learning_rate": 1.2025263157894737e-06,
2137
+ "loss": 0.0001,
2138
+ "step": 8575
2139
+ },
2140
+ {
2141
+ "epoch": 27.04,
2142
+ "learning_rate": 1.181473684210526e-06,
2143
+ "loss": 0.0001,
2144
+ "step": 8600
2145
+ },
2146
+ {
2147
+ "epoch": 27.12,
2148
+ "learning_rate": 1.160421052631579e-06,
2149
+ "loss": 0.0001,
2150
+ "step": 8625
2151
+ },
2152
+ {
2153
+ "epoch": 27.2,
2154
+ "learning_rate": 1.1393684210526316e-06,
2155
+ "loss": 0.0001,
2156
+ "step": 8650
2157
+ },
2158
+ {
2159
+ "epoch": 27.28,
2160
+ "learning_rate": 1.118315789473684e-06,
2161
+ "loss": 0.0001,
2162
+ "step": 8675
2163
+ },
2164
+ {
2165
+ "epoch": 27.36,
2166
+ "learning_rate": 1.097263157894737e-06,
2167
+ "loss": 0.0001,
2168
+ "step": 8700
2169
+ },
2170
+ {
2171
+ "epoch": 27.44,
2172
+ "learning_rate": 1.0762105263157894e-06,
2173
+ "loss": 0.0001,
2174
+ "step": 8725
2175
+ },
2176
+ {
2177
+ "epoch": 27.52,
2178
+ "learning_rate": 1.055157894736842e-06,
2179
+ "loss": 0.0001,
2180
+ "step": 8750
2181
+ },
2182
+ {
2183
+ "epoch": 27.59,
2184
+ "learning_rate": 1.0341052631578947e-06,
2185
+ "loss": 0.0001,
2186
+ "step": 8775
2187
+ },
2188
+ {
2189
+ "epoch": 27.67,
2190
+ "learning_rate": 1.0130526315789473e-06,
2191
+ "loss": 0.0001,
2192
+ "step": 8800
2193
+ },
2194
+ {
2195
+ "epoch": 27.75,
2196
+ "learning_rate": 9.92e-07,
2197
+ "loss": 0.0001,
2198
+ "step": 8825
2199
+ },
2200
+ {
2201
+ "epoch": 27.83,
2202
+ "learning_rate": 9.709473684210526e-07,
2203
+ "loss": 0.0001,
2204
+ "step": 8850
2205
+ },
2206
+ {
2207
+ "epoch": 27.91,
2208
+ "learning_rate": 9.498947368421052e-07,
2209
+ "loss": 0.0001,
2210
+ "step": 8875
2211
+ },
2212
+ {
2213
+ "epoch": 27.99,
2214
+ "learning_rate": 9.288421052631578e-07,
2215
+ "loss": 0.0001,
2216
+ "step": 8900
2217
+ },
2218
+ {
2219
+ "epoch": 28.07,
2220
+ "learning_rate": 9.077894736842104e-07,
2221
+ "loss": 0.0001,
2222
+ "step": 8925
2223
+ },
2224
+ {
2225
+ "epoch": 28.14,
2226
+ "learning_rate": 8.867368421052632e-07,
2227
+ "loss": 0.0001,
2228
+ "step": 8950
2229
+ },
2230
+ {
2231
+ "epoch": 28.22,
2232
+ "learning_rate": 8.656842105263158e-07,
2233
+ "loss": 0.0001,
2234
+ "step": 8975
2235
+ },
2236
+ {
2237
+ "epoch": 28.3,
2238
+ "learning_rate": 8.446315789473684e-07,
2239
+ "loss": 0.0001,
2240
+ "step": 9000
2241
+ },
2242
+ {
2243
+ "epoch": 28.3,
2244
+ "eval_loss": 0.5772180557250977,
2245
+ "eval_runtime": 1231.7677,
2246
+ "eval_samples_per_second": 1.732,
2247
+ "eval_steps_per_second": 0.217,
2248
+ "eval_wer": 13.729615991583376,
2249
+ "step": 9000
2250
+ },
2251
+ {
2252
+ "epoch": 28.38,
2253
+ "learning_rate": 8.23578947368421e-07,
2254
+ "loss": 0.0001,
2255
+ "step": 9025
2256
+ },
2257
+ {
2258
+ "epoch": 28.46,
2259
+ "learning_rate": 8.025263157894736e-07,
2260
+ "loss": 0.0001,
2261
+ "step": 9050
2262
+ },
2263
+ {
2264
+ "epoch": 28.54,
2265
+ "learning_rate": 7.814736842105263e-07,
2266
+ "loss": 0.0001,
2267
+ "step": 9075
2268
+ },
2269
+ {
2270
+ "epoch": 28.62,
2271
+ "learning_rate": 7.60421052631579e-07,
2272
+ "loss": 0.0001,
2273
+ "step": 9100
2274
+ },
2275
+ {
2276
+ "epoch": 28.69,
2277
+ "learning_rate": 7.393684210526315e-07,
2278
+ "loss": 0.0001,
2279
+ "step": 9125
2280
+ },
2281
+ {
2282
+ "epoch": 28.77,
2283
+ "learning_rate": 7.183157894736842e-07,
2284
+ "loss": 0.0001,
2285
+ "step": 9150
2286
+ },
2287
+ {
2288
+ "epoch": 28.85,
2289
+ "learning_rate": 6.972631578947367e-07,
2290
+ "loss": 0.0001,
2291
+ "step": 9175
2292
+ },
2293
+ {
2294
+ "epoch": 28.93,
2295
+ "learning_rate": 6.762105263157895e-07,
2296
+ "loss": 0.0001,
2297
+ "step": 9200
2298
+ },
2299
+ {
2300
+ "epoch": 29.01,
2301
+ "learning_rate": 6.55157894736842e-07,
2302
+ "loss": 0.0001,
2303
+ "step": 9225
2304
+ },
2305
+ {
2306
+ "epoch": 29.09,
2307
+ "learning_rate": 6.341052631578947e-07,
2308
+ "loss": 0.0001,
2309
+ "step": 9250
2310
+ },
2311
+ {
2312
+ "epoch": 29.17,
2313
+ "learning_rate": 6.130526315789474e-07,
2314
+ "loss": 0.0001,
2315
+ "step": 9275
2316
+ },
2317
+ {
2318
+ "epoch": 29.25,
2319
+ "learning_rate": 5.919999999999999e-07,
2320
+ "loss": 0.0001,
2321
+ "step": 9300
2322
+ },
2323
+ {
2324
+ "epoch": 29.32,
2325
+ "learning_rate": 5.709473684210527e-07,
2326
+ "loss": 0.0001,
2327
+ "step": 9325
2328
+ },
2329
+ {
2330
+ "epoch": 29.4,
2331
+ "learning_rate": 5.498947368421052e-07,
2332
+ "loss": 0.0001,
2333
+ "step": 9350
2334
+ },
2335
+ {
2336
+ "epoch": 29.48,
2337
+ "learning_rate": 5.288421052631579e-07,
2338
+ "loss": 0.0001,
2339
+ "step": 9375
2340
+ },
2341
+ {
2342
+ "epoch": 29.56,
2343
+ "learning_rate": 5.077894736842105e-07,
2344
+ "loss": 0.0001,
2345
+ "step": 9400
2346
+ },
2347
+ {
2348
+ "epoch": 29.64,
2349
+ "learning_rate": 4.867368421052631e-07,
2350
+ "loss": 0.0001,
2351
+ "step": 9425
2352
+ },
2353
+ {
2354
+ "epoch": 29.72,
2355
+ "learning_rate": 4.656842105263158e-07,
2356
+ "loss": 0.0001,
2357
+ "step": 9450
2358
+ },
2359
+ {
2360
+ "epoch": 29.8,
2361
+ "learning_rate": 4.4463157894736843e-07,
2362
+ "loss": 0.0001,
2363
+ "step": 9475
2364
+ },
2365
+ {
2366
+ "epoch": 29.87,
2367
+ "learning_rate": 4.2357894736842103e-07,
2368
+ "loss": 0.0001,
2369
+ "step": 9500
2370
+ },
2371
+ {
2372
+ "epoch": 29.95,
2373
+ "learning_rate": 4.0252631578947364e-07,
2374
+ "loss": 0.0001,
2375
+ "step": 9525
2376
+ },
2377
+ {
2378
+ "epoch": 30.03,
2379
+ "learning_rate": 3.814736842105263e-07,
2380
+ "loss": 0.0001,
2381
+ "step": 9550
2382
+ },
2383
+ {
2384
+ "epoch": 30.11,
2385
+ "learning_rate": 3.604210526315789e-07,
2386
+ "loss": 0.0001,
2387
+ "step": 9575
2388
+ },
2389
+ {
2390
+ "epoch": 30.19,
2391
+ "learning_rate": 3.393684210526316e-07,
2392
+ "loss": 0.0001,
2393
+ "step": 9600
2394
+ },
2395
+ {
2396
+ "epoch": 30.27,
2397
+ "learning_rate": 3.183157894736842e-07,
2398
+ "loss": 0.0001,
2399
+ "step": 9625
2400
+ },
2401
+ {
2402
+ "epoch": 30.35,
2403
+ "learning_rate": 2.972631578947368e-07,
2404
+ "loss": 0.0001,
2405
+ "step": 9650
2406
+ },
2407
+ {
2408
+ "epoch": 30.42,
2409
+ "learning_rate": 2.7621052631578946e-07,
2410
+ "loss": 0.0001,
2411
+ "step": 9675
2412
+ },
2413
+ {
2414
+ "epoch": 30.5,
2415
+ "learning_rate": 2.5515789473684206e-07,
2416
+ "loss": 0.0001,
2417
+ "step": 9700
2418
+ },
2419
+ {
2420
+ "epoch": 30.58,
2421
+ "learning_rate": 2.3410526315789472e-07,
2422
+ "loss": 0.0001,
2423
+ "step": 9725
2424
+ },
2425
+ {
2426
+ "epoch": 30.66,
2427
+ "learning_rate": 2.1305263157894734e-07,
2428
+ "loss": 0.0001,
2429
+ "step": 9750
2430
+ },
2431
+ {
2432
+ "epoch": 30.74,
2433
+ "learning_rate": 1.92e-07,
2434
+ "loss": 0.0001,
2435
+ "step": 9775
2436
+ },
2437
+ {
2438
+ "epoch": 30.82,
2439
+ "learning_rate": 1.7094736842105263e-07,
2440
+ "loss": 0.0001,
2441
+ "step": 9800
2442
+ },
2443
+ {
2444
+ "epoch": 30.9,
2445
+ "learning_rate": 1.4989473684210526e-07,
2446
+ "loss": 0.0001,
2447
+ "step": 9825
2448
+ },
2449
+ {
2450
+ "epoch": 30.97,
2451
+ "learning_rate": 1.2884210526315788e-07,
2452
+ "loss": 0.0001,
2453
+ "step": 9850
2454
+ },
2455
+ {
2456
+ "epoch": 31.05,
2457
+ "learning_rate": 1.0778947368421053e-07,
2458
+ "loss": 0.0001,
2459
+ "step": 9875
2460
+ },
2461
+ {
2462
+ "epoch": 31.13,
2463
+ "learning_rate": 8.673684210526315e-08,
2464
+ "loss": 0.0001,
2465
+ "step": 9900
2466
+ },
2467
+ {
2468
+ "epoch": 31.21,
2469
+ "learning_rate": 6.568421052631578e-08,
2470
+ "loss": 0.0001,
2471
+ "step": 9925
2472
+ },
2473
+ {
2474
+ "epoch": 31.29,
2475
+ "learning_rate": 4.463157894736842e-08,
2476
+ "loss": 0.0001,
2477
+ "step": 9950
2478
+ },
2479
+ {
2480
+ "epoch": 31.37,
2481
+ "learning_rate": 2.3578947368421052e-08,
2482
+ "loss": 0.0001,
2483
+ "step": 9975
2484
+ },
2485
+ {
2486
+ "epoch": 31.45,
2487
+ "learning_rate": 2.526315789473684e-09,
2488
+ "loss": 0.0001,
2489
+ "step": 10000
2490
+ },
2491
+ {
2492
+ "epoch": 31.45,
2493
+ "eval_loss": 0.5813759565353394,
2494
+ "eval_runtime": 1234.2331,
2495
+ "eval_samples_per_second": 1.728,
2496
+ "eval_steps_per_second": 0.216,
2497
+ "eval_wer": 13.708574434508153,
2498
+ "step": 10000
2499
+ },
2500
+ {
2501
+ "epoch": 31.45,
2502
+ "step": 10000,
2503
+ "total_flos": 3.263088216372019e+20,
2504
+ "train_loss": 0.0230206538159051,
2505
+ "train_runtime": 56008.0991,
2506
+ "train_samples_per_second": 5.713,
2507
+ "train_steps_per_second": 0.179
2508
+ }
2509
+ ],
2510
+ "max_steps": 10000,
2511
+ "num_train_epochs": 32,
2512
+ "total_flos": 3.263088216372019e+20,
2513
+ "trial_name": null,
2514
+ "trial_params": null
2515
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a31f574c900c75499bd9490b8ebb87b5d3e973f3c0daed72c40c0cadbb10d361
3
+ size 3579
vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
~/.cache/models--openai--whisper-medium/.no_exist/a0b3589e1034234495a1b696c28d4832cdaf8a32/generation_config.json ADDED
File without changes
~/.cache/models--openai--whisper-medium/.no_exist/a0b3589e1034234495a1b696c28d4832cdaf8a32/tokenizer.json ADDED
File without changes
~/.cache/models--openai--whisper-medium/blobs/0f3456460629e21d559c6daa23ab6ce3644e8271 ADDED
The diff for this file is too large to render. See raw diff
 
~/.cache/models--openai--whisper-medium/blobs/3a00c89ee5e8ae0cb159a6ec838843fb2266fac6 ADDED
The diff for this file is too large to render. See raw diff
 
~/.cache/models--openai--whisper-medium/blobs/47e9dd31523ecea227504afad3870da1cfe5ad81 ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|af|>": 50327,
3
+ "<|am|>": 50334,
4
+ "<|ar|>": 50272,
5
+ "<|as|>": 50350,
6
+ "<|az|>": 50304,
7
+ "<|ba|>": 50355,
8
+ "<|be|>": 50330,
9
+ "<|bg|>": 50292,
10
+ "<|bn|>": 50302,
11
+ "<|bo|>": 50347,
12
+ "<|br|>": 50309,
13
+ "<|bs|>": 50315,
14
+ "<|ca|>": 50270,
15
+ "<|cs|>": 50283,
16
+ "<|cy|>": 50297,
17
+ "<|da|>": 50285,
18
+ "<|de|>": 50261,
19
+ "<|el|>": 50281,
20
+ "<|endoftext|>": 50257,
21
+ "<|en|>": 50259,
22
+ "<|es|>": 50262,
23
+ "<|et|>": 50307,
24
+ "<|eu|>": 50310,
25
+ "<|fa|>": 50300,
26
+ "<|fi|>": 50277,
27
+ "<|fo|>": 50338,
28
+ "<|fr|>": 50265,
29
+ "<|gl|>": 50319,
30
+ "<|gu|>": 50333,
31
+ "<|haw|>": 50352,
32
+ "<|ha|>": 50354,
33
+ "<|hi|>": 50276,
34
+ "<|hr|>": 50291,
35
+ "<|ht|>": 50339,
36
+ "<|hu|>": 50286,
37
+ "<|hy|>": 50312,
38
+ "<|id|>": 50275,
39
+ "<|is|>": 50311,
40
+ "<|it|>": 50274,
41
+ "<|iw|>": 50279,
42
+ "<|ja|>": 50266,
43
+ "<|jw|>": 50356,
44
+ "<|ka|>": 50329,
45
+ "<|kk|>": 50316,
46
+ "<|km|>": 50323,
47
+ "<|kn|>": 50306,
48
+ "<|ko|>": 50264,
49
+ "<|la|>": 50294,
50
+ "<|lb|>": 50345,
51
+ "<|ln|>": 50353,
52
+ "<|lo|>": 50336,
53
+ "<|lt|>": 50293,
54
+ "<|lv|>": 50301,
55
+ "<|mg|>": 50349,
56
+ "<|mi|>": 50295,
57
+ "<|mk|>": 50308,
58
+ "<|ml|>": 50296,
59
+ "<|mn|>": 50314,
60
+ "<|mr|>": 50320,
61
+ "<|ms|>": 50282,
62
+ "<|mt|>": 50343,
63
+ "<|my|>": 50346,
64
+ "<|ne|>": 50313,
65
+ "<|nl|>": 50271,
66
+ "<|nn|>": 50342,
67
+ "<|nocaptions|>": 50362,
68
+ "<|notimestamps|>": 50363,
69
+ "<|no|>": 50288,
70
+ "<|oc|>": 50328,
71
+ "<|pa|>": 50321,
72
+ "<|pl|>": 50269,
73
+ "<|ps|>": 50340,
74
+ "<|pt|>": 50267,
75
+ "<|ro|>": 50284,
76
+ "<|ru|>": 50263,
77
+ "<|sa|>": 50344,
78
+ "<|sd|>": 50332,
79
+ "<|si|>": 50322,
80
+ "<|sk|>": 50298,
81
+ "<|sl|>": 50305,
82
+ "<|sn|>": 50324,
83
+ "<|so|>": 50326,
84
+ "<|sq|>": 50317,
85
+ "<|sr|>": 50303,
86
+ "<|startoflm|>": 50360,
87
+ "<|startofprev|>": 50361,
88
+ "<|startoftranscript|>": 50258,
89
+ "<|su|>": 50357,
90
+ "<|sv|>": 50273,
91
+ "<|sw|>": 50318,
92
+ "<|ta|>": 50287,
93
+ "<|te|>": 50299,
94
+ "<|tg|>": 50331,
95
+ "<|th|>": 50289,
96
+ "<|tk|>": 50341,
97
+ "<|tl|>": 50348,
98
+ "<|transcribe|>": 50359,
99
+ "<|translate|>": 50358,
100
+ "<|tr|>": 50268,
101
+ "<|tt|>": 50351,
102
+ "<|uk|>": 50280,
103
+ "<|ur|>": 50290,
104
+ "<|uz|>": 50337,
105
+ "<|vi|>": 50278,
106
+ "<|yi|>": 50335,
107
+ "<|yo|>": 50325,
108
+ "<|zh|>": 50260
109
+ }
~/.cache/models--openai--whisper-medium/blobs/5e6c8377adf6019428b34a1ad906fb43de71d387 ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 1024,
22
+ "name_or_path": "openai/whisper-medium",
23
+ "pad_token": null,
24
+ "processor_class": "WhisperProcessor",
25
+ "return_attention_mask": false,
26
+ "special_tokens_map_file": null,
27
+ "tokenizer_class": "WhisperTokenizer",
28
+ "unk_token": {
29
+ "__type": "AddedToken",
30
+ "content": "",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ }
36
+ }
~/.cache/models--openai--whisper-medium/blobs/9115b6806f75d5122486b0e1ae0279a0207199c2 ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|startoftranscript|>",
5
+ "<|en|>",
6
+ "<|zh|>",
7
+ "<|de|>",
8
+ "<|es|>",
9
+ "<|ru|>",
10
+ "<|ko|>",
11
+ "<|fr|>",
12
+ "<|ja|>",
13
+ "<|pt|>",
14
+ "<|tr|>",
15
+ "<|pl|>",
16
+ "<|ca|>",
17
+ "<|nl|>",
18
+ "<|ar|>",
19
+ "<|sv|>",
20
+ "<|it|>",
21
+ "<|id|>",
22
+ "<|hi|>",
23
+ "<|fi|>",
24
+ "<|vi|>",
25
+ "<|iw|>",
26
+ "<|uk|>",
27
+ "<|el|>",
28
+ "<|ms|>",
29
+ "<|cs|>",
30
+ "<|ro|>",
31
+ "<|da|>",
32
+ "<|hu|>",
33
+ "<|ta|>",
34
+ "<|no|>",
35
+ "<|th|>",
36
+ "<|ur|>",
37
+ "<|hr|>",
38
+ "<|bg|>",
39
+ "<|lt|>",
40
+ "<|la|>",
41
+ "<|mi|>",
42
+ "<|ml|>",
43
+ "<|cy|>",
44
+ "<|sk|>",
45
+ "<|te|>",
46
+ "<|fa|>",
47
+ "<|lv|>",
48
+ "<|bn|>",
49
+ "<|sr|>",
50
+ "<|az|>",
51
+ "<|sl|>",
52
+ "<|kn|>",
53
+ "<|et|>",
54
+ "<|mk|>",
55
+ "<|br|>",
56
+ "<|eu|>",
57
+ "<|is|>",
58
+ "<|hy|>",
59
+ "<|ne|>",
60
+ "<|mn|>",
61
+ "<|bs|>",
62
+ "<|kk|>",
63
+ "<|sq|>",
64
+ "<|sw|>",
65
+ "<|gl|>",
66
+ "<|mr|>",
67
+ "<|pa|>",
68
+ "<|si|>",
69
+ "<|km|>",
70
+ "<|sn|>",
71
+ "<|yo|>",
72
+ "<|so|>",
73
+ "<|af|>",
74
+ "<|oc|>",
75
+ "<|ka|>",
76
+ "<|be|>",
77
+ "<|tg|>",
78
+ "<|sd|>",
79
+ "<|gu|>",
80
+ "<|am|>",
81
+ "<|yi|>",
82
+ "<|lo|>",
83
+ "<|uz|>",
84
+ "<|fo|>",
85
+ "<|ht|>",
86
+ "<|ps|>",
87
+ "<|tk|>",
88
+ "<|nn|>",
89
+ "<|mt|>",
90
+ "<|sa|>",
91
+ "<|lb|>",
92
+ "<|my|>",
93
+ "<|bo|>",
94
+ "<|tl|>",
95
+ "<|mg|>",
96
+ "<|as|>",
97
+ "<|tt|>",
98
+ "<|haw|>",
99
+ "<|ln|>",
100
+ "<|ha|>",
101
+ "<|ba|>",
102
+ "<|jw|>",
103
+ "<|su|>",
104
+ "<|translate|>",
105
+ "<|transcribe|>",
106
+ "<|startoflm|>",
107
+ "<|startofprev|>",
108
+ "<|nocaptions|>",
109
+ "<|notimestamps|>"
110
+ ],
111
+ "bos_token": {
112
+ "content": "<|endoftext|>",
113
+ "lstrip": false,
114
+ "normalized": true,
115
+ "rstrip": false,
116
+ "single_word": false
117
+ },
118
+ "eos_token": {
119
+ "content": "<|endoftext|>",
120
+ "lstrip": false,
121
+ "normalized": true,
122
+ "rstrip": false,
123
+ "single_word": false
124
+ },
125
+ "pad_token": "<|endoftext|>",
126
+ "unk_token": {
127
+ "content": "",
128
+ "lstrip": false,
129
+ "normalized": true,
130
+ "rstrip": false,
131
+ "single_word": false
132
+ }
133
+ }
~/.cache/models--openai--whisper-medium/blobs/96d734d68ad5d63c8f41d525f5769788432f6963f32dbe36feefaa33d736a962 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96d734d68ad5d63c8f41d525f5769788432f6963f32dbe36feefaa33d736a962
3
+ size 3055735323
~/.cache/models--openai--whisper-medium/blobs/c2048dfa9fd94a052e62e908d2c4dfb18534b4d2 ADDED
The diff for this file is too large to render. See raw diff
 
~/.cache/models--openai--whisper-medium/blobs/dd6ae819ad738ac1a546e9f9282ef325c33b9ea0 ADDED
@@ -0,0 +1,1742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accessorise": "accessorize",
3
+ "accessorised": "accessorized",
4
+ "accessorises": "accessorizes",
5
+ "accessorising": "accessorizing",
6
+ "acclimatisation": "acclimatization",
7
+ "acclimatise": "acclimatize",
8
+ "acclimatised": "acclimatized",
9
+ "acclimatises": "acclimatizes",
10
+ "acclimatising": "acclimatizing",
11
+ "accoutrements": "accouterments",
12
+ "aeon": "eon",
13
+ "aeons": "eons",
14
+ "aerogramme": "aerogram",
15
+ "aerogrammes": "aerograms",
16
+ "aeroplane": "airplane",
17
+ "aeroplanes": "airplanes",
18
+ "aesthete": "esthete",
19
+ "aesthetes": "esthetes",
20
+ "aesthetic": "esthetic",
21
+ "aesthetically": "esthetically",
22
+ "aesthetics": "esthetics",
23
+ "aetiology": "etiology",
24
+ "ageing": "aging",
25
+ "aggrandisement": "aggrandizement",
26
+ "agonise": "agonize",
27
+ "agonised": "agonized",
28
+ "agonises": "agonizes",
29
+ "agonising": "agonizing",
30
+ "agonisingly": "agonizingly",
31
+ "almanack": "almanac",
32
+ "almanacks": "almanacs",
33
+ "aluminium": "aluminum",
34
+ "amortisable": "amortizable",
35
+ "amortisation": "amortization",
36
+ "amortisations": "amortizations",
37
+ "amortise": "amortize",
38
+ "amortised": "amortized",
39
+ "amortises": "amortizes",
40
+ "amortising": "amortizing",
41
+ "amphitheatre": "amphitheater",
42
+ "amphitheatres": "amphitheaters",
43
+ "anaemia": "anemia",
44
+ "anaemic": "anemic",
45
+ "anaesthesia": "anesthesia",
46
+ "anaesthetic": "anesthetic",
47
+ "anaesthetics": "anesthetics",
48
+ "anaesthetise": "anesthetize",
49
+ "anaesthetised": "anesthetized",
50
+ "anaesthetises": "anesthetizes",
51
+ "anaesthetising": "anesthetizing",
52
+ "anaesthetist": "anesthetist",
53
+ "anaesthetists": "anesthetists",
54
+ "anaesthetize": "anesthetize",
55
+ "anaesthetized": "anesthetized",
56
+ "anaesthetizes": "anesthetizes",
57
+ "anaesthetizing": "anesthetizing",
58
+ "analogue": "analog",
59
+ "analogues": "analogs",
60
+ "analyse": "analyze",
61
+ "analysed": "analyzed",
62
+ "analyses": "analyzes",
63
+ "analysing": "analyzing",
64
+ "anglicise": "anglicize",
65
+ "anglicised": "anglicized",
66
+ "anglicises": "anglicizes",
67
+ "anglicising": "anglicizing",
68
+ "annualised": "annualized",
69
+ "antagonise": "antagonize",
70
+ "antagonised": "antagonized",
71
+ "antagonises": "antagonizes",
72
+ "antagonising": "antagonizing",
73
+ "apologise": "apologize",
74
+ "apologised": "apologized",
75
+ "apologises": "apologizes",
76
+ "apologising": "apologizing",
77
+ "appal": "appall",
78
+ "appals": "appalls",
79
+ "appetiser": "appetizer",
80
+ "appetisers": "appetizers",
81
+ "appetising": "appetizing",
82
+ "appetisingly": "appetizingly",
83
+ "arbour": "arbor",
84
+ "arbours": "arbors",
85
+ "archaeologically": "archeologically",
86
+ "archaeologist": "archeologist",
87
+ "archaeologists": "archeologists",
88
+ "archaeology": "archeology</span>",
89
+ "archeological": "archaeological",
90
+ "ardour": "ardor",
91
+ "armour": "armor",
92
+ "armoured": "armored",
93
+ "armourer": "armorer",
94
+ "armourers": "armorers",
95
+ "armouries": "armories",
96
+ "armoury": "armory",
97
+ "artefact": "artifact",
98
+ "artefacts": "artifacts",
99
+ "authorise": "authorize",
100
+ "authorised": "authorized",
101
+ "authorises": "authorizes",
102
+ "authorising": "authorizing",
103
+ "axe": "ax",
104
+ "backpedalled": "backpedaled",
105
+ "backpedalling": "backpedaling",
106
+ "bannister": "banister",
107
+ "bannisters": "banisters",
108
+ "baptise": "baptize",
109
+ "baptised": "baptized",
110
+ "baptises": "baptizes",
111
+ "baptising": "baptizing",
112
+ "bastardise": "bastardize",
113
+ "bastardised": "bastardized",
114
+ "bastardises": "bastardizes",
115
+ "bastardising": "bastardizing",
116
+ "battleax": "battleaxe",
117
+ "baulk": "balk",
118
+ "baulked": "balked",
119
+ "baulking": "balking",
120
+ "baulks": "balks",
121
+ "bedevilled": "bedeviled",
122
+ "bedevilling": "bedeviling",
123
+ "behaviour": "behavior",
124
+ "behavioural": "behavioral",
125
+ "behaviourism": "behaviorism",
126
+ "behaviourist": "behaviorist",
127
+ "behaviourists": "behaviorists",
128
+ "behaviours": "behaviors",
129
+ "behove": "behoove",
130
+ "behoved": "behooved",
131
+ "behoves": "behooves",
132
+ "bejewelled": "bejeweled",
133
+ "belabour": "belabor",
134
+ "belaboured": "belabored",
135
+ "belabouring": "belaboring",
136
+ "belabours": "belabors",
137
+ "bevelled": "beveled",
138
+ "bevvies": "bevies",
139
+ "bevvy": "bevy",
140
+ "biassed": "biased",
141
+ "biassing": "biasing",
142
+ "bingeing": "binging",
143
+ "bougainvillaea": "bougainvillea",
144
+ "bougainvillaeas": "bougainvilleas",
145
+ "bowdlerise": "bowdlerize",
146
+ "bowdlerised": "bowdlerized",
147
+ "bowdlerises": "bowdlerizes",
148
+ "bowdlerising": "bowdlerizing",
149
+ "breathalyse": "breathalyze",
150
+ "breathalysed": "breathalyzed",
151
+ "breathalyser": "breathalyzer",
152
+ "breathalysers": "breathalyzers",
153
+ "breathalyses": "breathalyzes",
154
+ "breathalysing": "breathalyzing",
155
+ "brutalise": "brutalize",
156
+ "brutalised": "brutalized",
157
+ "brutalises": "brutalizes",
158
+ "brutalising": "brutalizing",
159
+ "busses": "buses",
160
+ "bussing": "busing",
161
+ "caesarean": "cesarean",
162
+ "caesareans": "cesareans",
163
+ "calibre": "caliber",
164
+ "calibres": "calibers",
165
+ "calliper": "caliper",
166
+ "callipers": "calipers",
167
+ "callisthenics": "calisthenics",
168
+ "canalise": "canalize",
169
+ "canalised": "canalized",
170
+ "canalises": "canalizes",
171
+ "canalising": "canalizing",
172
+ "cancelation": "cancellation",
173
+ "cancelations": "cancellations",
174
+ "cancelled": "canceled",
175
+ "cancelling": "canceling",
176
+ "candour": "candor",
177
+ "cannibalise": "cannibalize",
178
+ "cannibalised": "cannibalized",
179
+ "cannibalises": "cannibalizes",
180
+ "cannibalising": "cannibalizing",
181
+ "canonise": "canonize",
182
+ "canonised": "canonized",
183
+ "canonises": "canonizes",
184
+ "canonising": "canonizing",
185
+ "capitalise": "capitalize",
186
+ "capitalised": "capitalized",
187
+ "capitalises": "capitalizes",
188
+ "capitalising": "capitalizing",
189
+ "caramelise": "caramelize",
190
+ "caramelised": "caramelized",
191
+ "caramelises": "caramelizes",
192
+ "caramelising": "caramelizing",
193
+ "carbonise": "carbonize",
194
+ "carbonised": "carbonized",
195
+ "carbonises": "carbonizes",
196
+ "carbonising": "carbonizing",
197
+ "carolled": "caroled",
198
+ "carolling": "caroling",
199
+ "catalogue": "catalog",
200
+ "catalogued": "cataloged",
201
+ "catalogues": "catalogs",
202
+ "cataloguing": "cataloging",
203
+ "catalyse": "catalyze",
204
+ "catalysed": "catalyzed",
205
+ "catalyses": "catalyzes",
206
+ "catalysing": "catalyzing",
207
+ "categorise": "categorize",
208
+ "categorised": "categorized",
209
+ "categorises": "categorizes",
210
+ "categorising": "categorizing",
211
+ "cauterise": "cauterize",
212
+ "cauterised": "cauterized",
213
+ "cauterises": "cauterizes",
214
+ "cauterising": "cauterizing",
215
+ "cavilled": "caviled",
216
+ "cavilling": "caviling",
217
+ "centigramme": "centigram",
218
+ "centigrammes": "centigrams",
219
+ "centilitre": "centiliter",
220
+ "centilitres": "centiliters",
221
+ "centimetre": "centimeter",
222
+ "centimetres": "centimeters",
223
+ "centralise": "centralize",
224
+ "centralised": "centralized",
225
+ "centralises": "centralizes",
226
+ "centralising": "centralizing",
227
+ "centre": "center",
228
+ "centred": "centered",
229
+ "centrefold": "centerfold",
230
+ "centrefolds": "centerfolds",
231
+ "centrepiece": "centerpiece",
232
+ "centrepieces": "centerpieces",
233
+ "centres": "centers",
234
+ "channelled": "channeled",
235
+ "channelling": "channeling",
236
+ "characterise": "characterize",
237
+ "characterised": "characterized",
238
+ "characterises": "characterizes",
239
+ "characterising": "characterizing",
240
+ "cheque": "check",
241
+ "chequebook": "checkbook",
242
+ "chequebooks": "checkbooks",
243
+ "chequered": "checkered",
244
+ "cheques": "checks",
245
+ "chilli": "chili",
246
+ "chimaera": "chimera",
247
+ "chimaeras": "chimeras",
248
+ "chiselled": "chiseled",
249
+ "chiselling": "chiseling",
250
+ "circularise": "circularize",
251
+ "circularised": "circularized",
252
+ "circularises": "circularizes",
253
+ "circularising": "circularizing",
254
+ "civilise": "civilize",
255
+ "civilised": "civilized",
256
+ "civilises": "civilizes",
257
+ "civilising": "civilizing",
258
+ "clamour": "clamor",
259
+ "clamoured": "clamored",
260
+ "clamouring": "clamoring",
261
+ "clamours": "clamors",
262
+ "clangour": "clangor",
263
+ "clarinettist": "clarinetist",
264
+ "clarinettists": "clarinetists",
265
+ "collectivise": "collectivize",
266
+ "collectivised": "collectivized",
267
+ "collectivises": "collectivizes",
268
+ "collectivising": "collectivizing",
269
+ "colonisation": "colonization",
270
+ "colonise": "colonize",
271
+ "colonised": "colonized",
272
+ "coloniser": "colonizer",
273
+ "colonisers": "colonizers",
274
+ "colonises": "colonizes",
275
+ "colonising": "colonizing",
276
+ "colour": "color",
277
+ "colourant": "colorant",
278
+ "colourants": "colorants",
279
+ "coloured": "colored",
280
+ "coloureds": "coloreds",
281
+ "colourful": "colorful",
282
+ "colourfully": "colorfully",
283
+ "colouring": "coloring",
284
+ "colourize": "colorize",
285
+ "colourized": "colorized",
286
+ "colourizes": "colorizes",
287
+ "colourizing": "colorizing",
288
+ "colourless": "colorless",
289
+ "colours": "colors",
290
+ "commercialise": "commercialize",
291
+ "commercialised": "commercialized",
292
+ "commercialises": "commercializes",
293
+ "commercialising": "commercializing",
294
+ "compartmentalise": "compartmentalize",
295
+ "compartmentalised": "compartmentalized",
296
+ "compartmentalises": "compartmentalizes",
297
+ "compartmentalising": "compartmentalizing",
298
+ "computerise": "computerize",
299
+ "computerised": "computerized",
300
+ "computerises": "computerizes",
301
+ "computerising": "computerizing",
302
+ "conceptualise": "conceptualize",
303
+ "conceptualised": "conceptualized",
304
+ "conceptualises": "conceptualizes",
305
+ "conceptualising": "conceptualizing",
306
+ "connexion": "connection",
307
+ "connexions": "connections",
308
+ "contextualise": "contextualize",
309
+ "contextualised": "contextualized",
310
+ "contextualises": "contextualizes",
311
+ "contextualising": "contextualizing",
312
+ "cosier": "cozier",
313
+ "cosies": "cozies",
314
+ "cosiest": "coziest",
315
+ "cosily": "cozily",
316
+ "cosiness": "coziness",
317
+ "cosy": "cozy",
318
+ "councillor": "councilor",
319
+ "councillors": "councilors",
320
+ "counselled": "counseled",
321
+ "counselling": "counseling",
322
+ "counsellor": "counselor",
323
+ "counsellors": "counselors",
324
+ "crenelated": "crenellated",
325
+ "criminalise": "criminalize",
326
+ "criminalised": "criminalized",
327
+ "criminalises": "criminalizes",
328
+ "criminalising": "criminalizing",
329
+ "criticise": "criticize",
330
+ "criticised": "criticized",
331
+ "criticises": "criticizes",
332
+ "criticising": "criticizing",
333
+ "crueller": "crueler",
334
+ "cruellest": "cruelest",
335
+ "crystallisation": "crystallization",
336
+ "crystallise": "crystallize",
337
+ "crystallised": "crystallized",
338
+ "crystallises": "crystallizes",
339
+ "crystallising": "crystallizing",
340
+ "cudgelled": "cudgeled",
341
+ "cudgelling": "cudgeling",
342
+ "customise": "customize",
343
+ "customised": "customized",
344
+ "customises": "customizes",
345
+ "customising": "customizing",
346
+ "cypher": "cipher",
347
+ "cyphers": "ciphers",
348
+ "decentralisation": "decentralization",
349
+ "decentralise": "decentralize",
350
+ "decentralised": "decentralized",
351
+ "decentralises": "decentralizes",
352
+ "decentralising": "decentralizing",
353
+ "decriminalisation": "decriminalization",
354
+ "decriminalise": "decriminalize",
355
+ "decriminalised": "decriminalized",
356
+ "decriminalises": "decriminalizes",
357
+ "decriminalising": "decriminalizing",
358
+ "defence": "defense",
359
+ "defenceless": "defenseless",
360
+ "defences": "defenses",
361
+ "dehumanisation": "dehumanization",
362
+ "dehumanise": "dehumanize",
363
+ "dehumanised": "dehumanized",
364
+ "dehumanises": "dehumanizes",
365
+ "dehumanising": "dehumanizing",
366
+ "demeanour": "demeanor",
367
+ "demilitarisation": "demilitarization",
368
+ "demilitarise": "demilitarize",
369
+ "demilitarised": "demilitarized",
370
+ "demilitarises": "demilitarizes",
371
+ "demilitarising": "demilitarizing",
372
+ "demobilisation": "demobilization",
373
+ "demobilise": "demobilize",
374
+ "demobilised": "demobilized",
375
+ "demobilises": "demobilizes",
376
+ "demobilising": "demobilizing",
377
+ "democratisation": "democratization",
378
+ "democratise": "democratize",
379
+ "democratised": "democratized",
380
+ "democratises": "democratizes",
381
+ "democratising": "democratizing",
382
+ "demonise": "demonize",
383
+ "demonised": "demonized",
384
+ "demonises": "demonizes",
385
+ "demonising": "demonizing",
386
+ "demoralisation": "demoralization",
387
+ "demoralise": "demoralize",
388
+ "demoralised": "demoralized",
389
+ "demoralises": "demoralizes",
390
+ "demoralising": "demoralizing",
391
+ "denationalisation": "denationalization",
392
+ "denationalise": "denationalize",
393
+ "denationalised": "denationalized",
394
+ "denationalises": "denationalizes",
395
+ "denationalising": "denationalizing",
396
+ "deodorise": "deodorize",
397
+ "deodorised": "deodorized",
398
+ "deodorises": "deodorizes",
399
+ "deodorising": "deodorizing",
400
+ "depersonalise": "depersonalize",
401
+ "depersonalised": "depersonalized",
402
+ "depersonalises": "depersonalizes",
403
+ "depersonalising": "depersonalizing",
404
+ "deputise": "deputize",
405
+ "deputised": "deputized",
406
+ "deputises": "deputizes",
407
+ "deputising": "deputizing",
408
+ "desensitisation": "desensitization",
409
+ "desensitise": "desensitize",
410
+ "desensitised": "desensitized",
411
+ "desensitises": "desensitizes",
412
+ "desensitising": "desensitizing",
413
+ "destabilisation": "destabilization",
414
+ "destabilise": "destabilize",
415
+ "destabilised": "destabilized",
416
+ "destabilises": "destabilizes",
417
+ "destabilising": "destabilizing",
418
+ "dialled": "dialed",
419
+ "dialling": "dialing",
420
+ "dialogue": "dialog",
421
+ "dialogues": "dialogs",
422
+ "diarrhoea": "diarrhea",
423
+ "digitise": "digitize",
424
+ "digitised": "digitized",
425
+ "digitises": "digitizes",
426
+ "digitising": "digitizing",
427
+ "disc": "disk",
428
+ "discolour": "discolor",
429
+ "discoloured": "discolored",
430
+ "discolouring": "discoloring",
431
+ "discolours": "discolors",
432
+ "discs": "disks",
433
+ "disembowelled": "disemboweled",
434
+ "disembowelling": "disemboweling",
435
+ "disfavour": "disfavor",
436
+ "dishevelled": "disheveled",
437
+ "dishonour": "dishonor",
438
+ "dishonourable": "dishonorable",
439
+ "dishonourably": "dishonorably",
440
+ "dishonoured": "dishonored",
441
+ "dishonouring": "dishonoring",
442
+ "dishonours": "dishonors",
443
+ "disorganisation": "disorganization",
444
+ "disorganised": "disorganized",
445
+ "distil": "distill",
446
+ "distils": "distills",
447
+ "dramatisation": "dramatization",
448
+ "dramatisations": "dramatizations",
449
+ "dramatise": "dramatize",
450
+ "dramatised": "dramatized",
451
+ "dramatises": "dramatizes",
452
+ "dramatising": "dramatizing",
453
+ "draught": "draft",
454
+ "draughtboard": "draftboard",
455
+ "draughtboards": "draftboards",
456
+ "draughtier": "draftier",
457
+ "draughtiest": "draftiest",
458
+ "draughts": "drafts",
459
+ "draughtsman": "draftsman",
460
+ "draughtsmanship": "draftsmanship",
461
+ "draughtsmen": "draftsmen",
462
+ "draughtswoman": "draftswoman",
463
+ "draughtswomen": "draftswomen",
464
+ "draughty": "drafty",
465
+ "drivelled": "driveled",
466
+ "drivelling": "driveling",
467
+ "duelled": "dueled",
468
+ "duelling": "dueling",
469
+ "economise": "economize",
470
+ "economised": "economized",
471
+ "economises": "economizes",
472
+ "economising": "economizing",
473
+ "editorialise": "editorialize",
474
+ "editorialised": "editorialized",
475
+ "editorialises": "editorializes",
476
+ "editorialising": "editorializing",
477
+ "edoema": "edema",
478
+ "empathise": "empathize",
479
+ "empathised": "empathized",
480
+ "empathises": "empathizes",
481
+ "empathising": "empathizing",
482
+ "emphasise": "emphasize",
483
+ "emphasised": "emphasized",
484
+ "emphasises": "emphasizes",
485
+ "emphasising": "emphasizing",
486
+ "enamelled": "enameled",
487
+ "enamelling": "enameling",
488
+ "enamoured": "enamored",
489
+ "encyclopaedia": "encyclopedia",
490
+ "encyclopaedias": "encyclopedias",
491
+ "encyclopaedic": "encyclopedic",
492
+ "endeavour": "endeavor",
493
+ "endeavoured": "endeavored",
494
+ "endeavouring": "endeavoring",
495
+ "endeavours": "endeavors",
496
+ "energise": "energize",
497
+ "energised": "energized",
498
+ "energises": "energizes",
499
+ "energising": "energizing",
500
+ "enrol": "enroll",
501
+ "enrols": "enrolls",
502
+ "enthral": "enthrall",
503
+ "enthrals": "enthralls",
504
+ "epaulette": "epaulet",
505
+ "epaulettes": "epaulets",
506
+ "epicentre": "epicenter",
507
+ "epicentres": "epicenters",
508
+ "epilogue": "epilog",
509
+ "epilogues": "epilogs",
510
+ "epitomise": "epitomize",
511
+ "epitomised": "epitomized",
512
+ "epitomises": "epitomizes",
513
+ "epitomising": "epitomizing",
514
+ "equalisation": "equalization",
515
+ "equalise": "equalize",
516
+ "equalised": "equalized",
517
+ "equaliser": "equalizer",
518
+ "equalisers": "equalizers",
519
+ "equalises": "equalizes",
520
+ "equalising": "equalizing",
521
+ "eulogise": "eulogize",
522
+ "eulogised": "eulogized",
523
+ "eulogises": "eulogizes",
524
+ "eulogising": "eulogizing",
525
+ "evangelise": "evangelize",
526
+ "evangelised": "evangelized",
527
+ "evangelises": "evangelizes",
528
+ "evangelising": "evangelizing",
529
+ "exorcise": "exorcize",
530
+ "exorcised": "exorcized",
531
+ "exorcises": "exorcizes",
532
+ "exorcising": "exorcizing",
533
+ "extemporisation": "extemporization",
534
+ "extemporise": "extemporize",
535
+ "extemporised": "extemporized",
536
+ "extemporises": "extemporizes",
537
+ "extemporising": "extemporizing",
538
+ "externalisation": "externalization",
539
+ "externalisations": "externalizations",
540
+ "externalise": "externalize",
541
+ "externalised": "externalized",
542
+ "externalises": "externalizes",
543
+ "externalising": "externalizing",
544
+ "factorise": "factorize",
545
+ "factorised": "factorized",
546
+ "factorises": "factorizes",
547
+ "factorising": "factorizing",
548
+ "faecal": "fecal",
549
+ "faeces": "feces",
550
+ "familiarisation": "familiarization",
551
+ "familiarise": "familiarize",
552
+ "familiarised": "familiarized",
553
+ "familiarises": "familiarizes",
554
+ "familiarising": "familiarizing",
555
+ "fantasise": "fantasize",
556
+ "fantasised": "fantasized",
557
+ "fantasises": "fantasizes",
558
+ "fantasising": "fantasizing",
559
+ "favour": "favor",
560
+ "favourable": "favorable",
561
+ "favourably": "favorably",
562
+ "favoured": "favored",
563
+ "favouring": "favoring",
564
+ "favourite": "favorite",
565
+ "favourites": "favorites",
566
+ "favouritism": "favoritism",
567
+ "favours": "favors",
568
+ "feminise": "feminize",
569
+ "feminised": "feminized",
570
+ "feminises": "feminizes",
571
+ "feminising": "feminizing",
572
+ "fertilisation": "fertilization",
573
+ "fertilise": "fertilize",
574
+ "fertilised": "fertilized",
575
+ "fertiliser": "fertilizer",
576
+ "fertilisers": "fertilizers",
577
+ "fertilises": "fertilizes",
578
+ "fertilising": "fertilizing",
579
+ "fervour": "fervor",
580
+ "fibre": "fiber",
581
+ "fibreglass": "fiberglass",
582
+ "fibres": "fibers",
583
+ "fictionalisation": "fictionalization",
584
+ "fictionalisations": "fictionalizations",
585
+ "fictionalise": "fictionalize",
586
+ "fictionalised": "fictionalized",
587
+ "fictionalises": "fictionalizes",
588
+ "fictionalising": "fictionalizing",
589
+ "fillet": "filet",
590
+ "filleted": "fileted",
591
+ "filleting": "fileting",
592
+ "fillets": "filets",
593
+ "finalisation": "finalization",
594
+ "finalise": "finalize",
595
+ "finalised": "finalized",
596
+ "finalises": "finalizes",
597
+ "finalising": "finalizing",
598
+ "flautist": "flutist",
599
+ "flautists": "flutists",
600
+ "flavour": "flavor",
601
+ "flavoured": "flavored",
602
+ "flavouring": "flavoring",
603
+ "flavourings": "flavorings",
604
+ "flavourless": "flavorless",
605
+ "flavours": "flavors",
606
+ "flavoursome": "flavorsome",
607
+ "flyer / flier": "flier / flyer",
608
+ "foetal": "fetal",
609
+ "foetid": "fetid",
610
+ "foetus": "fetus",
611
+ "foetuses": "fetuses",
612
+ "formalisation": "formalization",
613
+ "formalise": "formalize",
614
+ "formalised": "formalized",
615
+ "formalises": "formalizes",
616
+ "formalising": "formalizing",
617
+ "fossilisation": "fossilization",
618
+ "fossilise": "fossilize",
619
+ "fossilised": "fossilized",
620
+ "fossilises": "fossilizes",
621
+ "fossilising": "fossilizing",
622
+ "fraternisation": "fraternization",
623
+ "fraternise": "fraternize",
624
+ "fraternised": "fraternized",
625
+ "fraternises": "fraternizes",
626
+ "fraternising": "fraternizing",
627
+ "fulfil": "fulfill",
628
+ "fulfilment": "fulfillment",
629
+ "fulfils": "fulfills",
630
+ "funnelled": "funneled",
631
+ "funnelling": "funneling",
632
+ "gage": "gauge",
633
+ "gaged": "gauged",
634
+ "gages": "gauges",
635
+ "gaging": "gauging",
636
+ "galvanise": "galvanize",
637
+ "galvanised": "galvanized",
638
+ "galvanises": "galvanizes",
639
+ "galvanising": "galvanizing",
640
+ "gambolled": "gamboled",
641
+ "gambolling": "gamboling",
642
+ "gaol": "jail",
643
+ "gaolbird": "jailbird",
644
+ "gaolbirds": "jailbirds",
645
+ "gaolbreak": "jailbreak",
646
+ "gaolbreaks": "jailbreaks",
647
+ "gaoled": "jailed",
648
+ "gaoler": "jailer",
649
+ "gaolers": "jailers",
650
+ "gaoling": "jailing",
651
+ "gaols": "jails",
652
+ "gasses": "gases",
653
+ "generalisation": "generalization",
654
+ "generalisations": "generalizations",
655
+ "generalise": "generalize",
656
+ "generalised": "generalized",
657
+ "generalises": "generalizes",
658
+ "generalising": "generalizing",
659
+ "ghettoise": "ghettoize",
660
+ "ghettoised": "ghettoized",
661
+ "ghettoises": "ghettoizes",
662
+ "ghettoising": "ghettoizing",
663
+ "gipsies": "gypsies",
664
+ "glamor": "glamour",
665
+ "glamorise": "glamorize",
666
+ "glamorised": "glamorized",
667
+ "glamorises": "glamorizes",
668
+ "glamorising": "glamorizing",
669
+ "globalisation": "globalization",
670
+ "globalise": "globalize",
671
+ "globalised": "globalized",
672
+ "globalises": "globalizes",
673
+ "globalising": "globalizing",
674
+ "glueing": "gluing",
675
+ "goitre": "goiter",
676
+ "goitres": "goiters",
677
+ "gonorrhoea": "gonorrhea",
678
+ "gramme": "gram",
679
+ "grammes": "grams",
680
+ "gravelled": "graveled",
681
+ "grey": "gray",
682
+ "greyed": "grayed",
683
+ "greying": "graying",
684
+ "greyish": "grayish",
685
+ "greyness": "grayness",
686
+ "greys": "grays",
687
+ "grovelled": "groveled",
688
+ "grovelling": "groveling",
689
+ "groyne": "groin",
690
+ "groynes": "groins",
691
+ "gruelling": "grueling",
692
+ "gruellingly": "gruelingly",
693
+ "gryphon": "griffin",
694
+ "gryphons": "griffins",
695
+ "gynaecological": "gynecological",
696
+ "gynaecologist": "gynecologist",
697
+ "gynaecologists": "gynecologists",
698
+ "gynaecology": "gynecology",
699
+ "haematological": "hematological",
700
+ "haematologist": "hematologist",
701
+ "haematologists": "hematologists",
702
+ "haematology": "hematology",
703
+ "haemoglobin": "hemoglobin",
704
+ "haemophilia": "hemophilia",
705
+ "haemophiliac": "hemophiliac",
706
+ "haemophiliacs": "hemophiliacs",
707
+ "haemorrhage": "hemorrhage",
708
+ "haemorrhaged": "hemorrhaged",
709
+ "haemorrhages": "hemorrhages",
710
+ "haemorrhaging": "hemorrhaging",
711
+ "haemorrhoids": "hemorrhoids",
712
+ "harbour": "harbor",
713
+ "harboured": "harbored",
714
+ "harbouring": "harboring",
715
+ "harbours": "harbors",
716
+ "harmonisation": "harmonization",
717
+ "harmonise": "harmonize",
718
+ "harmonised": "harmonized",
719
+ "harmonises": "harmonizes",
720
+ "harmonising": "harmonizing",
721
+ "homoeopath": "homeopath",
722
+ "homoeopathic": "homeopathic",
723
+ "homoeopaths": "homeopaths",
724
+ "homoeopathy": "homeopathy",
725
+ "homogenise": "homogenize",
726
+ "homogenised": "homogenized",
727
+ "homogenises": "homogenizes",
728
+ "homogenising": "homogenizing",
729
+ "honour": "honor",
730
+ "honourable": "honorable",
731
+ "honourably": "honorably",
732
+ "honoured": "honored",
733
+ "honouring": "honoring",
734
+ "honours": "honors",
735
+ "hospitalisation": "hospitalization",
736
+ "hospitalise": "hospitalize",
737
+ "hospitalised": "hospitalized",
738
+ "hospitalises": "hospitalizes",
739
+ "hospitalising": "hospitalizing",
740
+ "humanise": "humanize",
741
+ "humanised": "humanized",
742
+ "humanises": "humanizes",
743
+ "humanising": "humanizing",
744
+ "humour": "humor",
745
+ "humoured": "humored",
746
+ "humouring": "humoring",
747
+ "humourless": "humorless",
748
+ "humours": "humors",
749
+ "hybridise": "hybridize",
750
+ "hybridised": "hybridized",
751
+ "hybridises": "hybridizes",
752
+ "hybridising": "hybridizing",
753
+ "hypnotise": "hypnotize",
754
+ "hypnotised": "hypnotized",
755
+ "hypnotises": "hypnotizes",
756
+ "hypnotising": "hypnotizing",
757
+ "hypothesise": "hypothesize",
758
+ "hypothesised": "hypothesized",
759
+ "hypothesises": "hypothesizes",
760
+ "hypothesising": "hypothesizing",
761
+ "idealisation": "idealization",
762
+ "idealise": "idealize",
763
+ "idealised": "idealized",
764
+ "idealises": "idealizes",
765
+ "idealising": "idealizing",
766
+ "idolise": "idolize",
767
+ "idolised": "idolized",
768
+ "idolises": "idolizes",
769
+ "idolising": "idolizing",
770
+ "immobilisation": "immobilization",
771
+ "immobilise": "immobilize",
772
+ "immobilised": "immobilized",
773
+ "immobiliser": "immobilizer",
774
+ "immobilisers": "immobilizers",
775
+ "immobilises": "immobilizes",
776
+ "immobilising": "immobilizing",
777
+ "immortalise": "immortalize",
778
+ "immortalised": "immortalized",
779
+ "immortalises": "immortalizes",
780
+ "immortalising": "immortalizing",
781
+ "immunisation": "immunization",
782
+ "immunise": "immunize",
783
+ "immunised": "immunized",
784
+ "immunises": "immunizes",
785
+ "immunising": "immunizing",
786
+ "impanelled": "impaneled",
787
+ "impanelling": "impaneling",
788
+ "imperilled": "imperiled",
789
+ "imperilling": "imperiling",
790
+ "individualise": "individualize",
791
+ "individualised": "individualized",
792
+ "individualises": "individualizes",
793
+ "individualising": "individualizing",
794
+ "industrialise": "industrialize",
795
+ "industrialised": "industrialized",
796
+ "industrialises": "industrializes",
797
+ "industrialising": "industrializing",
798
+ "inflexion": "inflection",
799
+ "inflexions": "inflections",
800
+ "initialise": "initialize",
801
+ "initialised": "initialized",
802
+ "initialises": "initializes",
803
+ "initialising": "initializing",
804
+ "initialled": "initialed",
805
+ "initialling": "initialing",
806
+ "instal": "install",
807
+ "instalment": "installment",
808
+ "instalments": "installments",
809
+ "instals": "installs",
810
+ "instil": "instill",
811
+ "instils": "instills",
812
+ "institutionalisation": "institutionalization",
813
+ "institutionalise": "institutionalize",
814
+ "institutionalised": "institutionalized",
815
+ "institutionalises": "institutionalizes",
816
+ "institutionalising": "institutionalizing",
817
+ "intellectualise": "intellectualize",
818
+ "intellectualised": "intellectualized",
819
+ "intellectualises": "intellectualizes",
820
+ "intellectualising": "intellectualizing",
821
+ "internalisation": "internalization",
822
+ "internalise": "internalize",
823
+ "internalised": "internalized",
824
+ "internalises": "internalizes",
825
+ "internalising": "internalizing",
826
+ "internationalisation": "internationalization",
827
+ "internationalise": "internationalize",
828
+ "internationalised": "internationalized",
829
+ "internationalises": "internationalizes",
830
+ "internationalising": "internationalizing",
831
+ "ionisation": "ionization",
832
+ "ionise": "ionize",
833
+ "ionised": "ionized",
834
+ "ioniser": "ionizer",
835
+ "ionisers": "ionizers",
836
+ "ionises": "ionizes",
837
+ "ionising": "ionizing",
838
+ "italicise": "italicize",
839
+ "italicised": "italicized",
840
+ "italicises": "italicizes",
841
+ "italicising": "italicizing",
842
+ "itemise": "itemize",
843
+ "itemised": "itemized",
844
+ "itemises": "itemizes",
845
+ "itemising": "itemizing",
846
+ "jeopardise": "jeopardize",
847
+ "jeopardised": "jeopardized",
848
+ "jeopardises": "jeopardizes",
849
+ "jeopardising": "jeopardizing",
850
+ "jewelled": "jeweled",
851
+ "jeweller": "jeweler",
852
+ "jewellers": "jewelers",
853
+ "jewellery": "jewelry",
854
+ "judgement": "judgment",
855
+ "kilogramme": "kilogram",
856
+ "kilogrammes": "kilograms",
857
+ "kilometre": "kilometer",
858
+ "kilometres": "kilometers",
859
+ "labelled": "labeled",
860
+ "labelling": "labeling",
861
+ "labour": "labor",
862
+ "laboured": "labored",
863
+ "labourer": "laborer",
864
+ "labourers": "laborers",
865
+ "labouring": "laboring",
866
+ "labours": "labors",
867
+ "lacklustre": "lackluster",
868
+ "legalisation": "legalization",
869
+ "legalise": "legalize",
870
+ "legalised": "legalized",
871
+ "legalises": "legalizes",
872
+ "legalising": "legalizing",
873
+ "legitimise": "legitimize",
874
+ "legitimised": "legitimized",
875
+ "legitimises": "legitimizes",
876
+ "legitimising": "legitimizing",
877
+ "leukaemia": "leukemia",
878
+ "levelled": "leveled",
879
+ "leveller": "leveler",
880
+ "levellers": "levelers",
881
+ "levelling": "leveling",
882
+ "libelled": "libeled",
883
+ "libelling": "libeling",
884
+ "libellous": "libelous",
885
+ "liberalisation": "liberalization",
886
+ "liberalise": "liberalize",
887
+ "liberalised": "liberalized",
888
+ "liberalises": "liberalizes",
889
+ "liberalising": "liberalizing",
890
+ "licence": "license",
891
+ "licenced": "licensed",
892
+ "licences": "licenses",
893
+ "licencing": "licensing",
894
+ "likeable": "likable",
895
+ "lionisation": "lionization",
896
+ "lionise": "lionize",
897
+ "lionised": "lionized",
898
+ "lionises": "lionizes",
899
+ "lionising": "lionizing",
900
+ "liquidise": "liquidize",
901
+ "liquidised": "liquidized",
902
+ "liquidiser": "liquidizer",
903
+ "liquidisers": "liquidizers",
904
+ "liquidises": "liquidizes",
905
+ "liquidising": "liquidizing",
906
+ "litre": "liter",
907
+ "litres": "liters",
908
+ "localise": "localize",
909
+ "localised": "localized",
910
+ "localises": "localizes",
911
+ "localising": "localizing",
912
+ "louvre": "louver",
913
+ "louvred": "louvered",
914
+ "louvres": "louvers",
915
+ "lustre": "luster",
916
+ "magnetise": "magnetize",
917
+ "magnetised": "magnetized",
918
+ "magnetises": "magnetizes",
919
+ "magnetising": "magnetizing",
920
+ "manoeuvrability": "maneuverability",
921
+ "manoeuvrable": "maneuverable",
922
+ "manoeuvre": "maneuver",
923
+ "manoeuvred": "maneuvered",
924
+ "manoeuvres": "maneuvers",
925
+ "manoeuvring": "maneuvering",
926
+ "manoeuvrings": "maneuverings",
927
+ "marginalisation": "marginalization",
928
+ "marginalise": "marginalize",
929
+ "marginalised": "marginalized",
930
+ "marginalises": "marginalizes",
931
+ "marginalising": "marginalizing",
932
+ "marshalled": "marshaled",
933
+ "marshalling": "marshaling",
934
+ "marvelled": "marveled",
935
+ "marvelling": "marveling",
936
+ "marvellous": "marvelous",
937
+ "marvellously": "marvelously",
938
+ "materialisation": "materialization",
939
+ "materialise": "materialize",
940
+ "materialised": "materialized",
941
+ "materialises": "materializes",
942
+ "materialising": "materializing",
943
+ "maximisation": "maximization",
944
+ "maximise": "maximize",
945
+ "maximised": "maximized",
946
+ "maximises": "maximizes",
947
+ "maximising": "maximizing",
948
+ "meagre": "meager",
949
+ "mechanisation": "mechanization",
950
+ "mechanise": "mechanize",
951
+ "mechanised": "mechanized",
952
+ "mechanises": "mechanizes",
953
+ "mechanising": "mechanizing",
954
+ "mediaeval": "medieval",
955
+ "memorialise": "memorialize",
956
+ "memorialised": "memorialized",
957
+ "memorialises": "memorializes",
958
+ "memorialising": "memorializing",
959
+ "memorise": "memorize",
960
+ "memorised": "memorized",
961
+ "memorises": "memorizes",
962
+ "memorising": "memorizing",
963
+ "mesmerise": "mesmerize",
964
+ "mesmerised": "mesmerized",
965
+ "mesmerises": "mesmerizes",
966
+ "mesmerising": "mesmerizing",
967
+ "metabolise": "metabolize",
968
+ "metabolised": "metabolized",
969
+ "metabolises": "metabolizes",
970
+ "metabolising": "metabolizing",
971
+ "metre": "meter",
972
+ "metres": "meters",
973
+ "mhm": "hmm",
974
+ "micrometre": "micrometer",
975
+ "micrometres": "micrometers",
976
+ "militarise": "militarize",
977
+ "militarised": "militarized",
978
+ "militarises": "militarizes",
979
+ "militarising": "militarizing",
980
+ "milligramme": "milligram",
981
+ "milligrammes": "milligrams",
982
+ "millilitre": "milliliter",
983
+ "millilitres": "milliliters",
984
+ "millimetre": "millimeter",
985
+ "millimetres": "millimeters",
986
+ "miniaturisation": "miniaturization",
987
+ "miniaturise": "miniaturize",
988
+ "miniaturised": "miniaturized",
989
+ "miniaturises": "miniaturizes",
990
+ "miniaturising": "miniaturizing",
991
+ "minibusses": "minibuses",
992
+ "minimise": "minimize",
993
+ "minimised": "minimized",
994
+ "minimises": "minimizes",
995
+ "minimising": "minimizing",
996
+ "misbehaviour": "misbehavior",
997
+ "misdemeanour": "misdemeanor",
998
+ "misdemeanours": "misdemeanors",
999
+ "misspelt": "misspelled",
1000
+ "mitre": "miter",
1001
+ "mitres": "miters",
1002
+ "mm": "hmm",
1003
+ "mmm": "hmm",
1004
+ "mobilisation": "mobilization",
1005
+ "mobilise": "mobilize",
1006
+ "mobilised": "mobilized",
1007
+ "mobilises": "mobilizes",
1008
+ "mobilising": "mobilizing",
1009
+ "modelled": "modeled",
1010
+ "modeller": "modeler",
1011
+ "modellers": "modelers",
1012
+ "modelling": "modeling",
1013
+ "modernise": "modernize",
1014
+ "modernised": "modernized",
1015
+ "modernises": "modernizes",
1016
+ "modernising": "modernizing",
1017
+ "moisturise": "moisturize",
1018
+ "moisturised": "moisturized",
1019
+ "moisturiser": "moisturizer",
1020
+ "moisturisers": "moisturizers",
1021
+ "moisturises": "moisturizes",
1022
+ "moisturising": "moisturizing",
1023
+ "monologue": "monolog",
1024
+ "monologues": "monologs",
1025
+ "monopolisation": "monopolization",
1026
+ "monopolise": "monopolize",
1027
+ "monopolised": "monopolized",
1028
+ "monopolises": "monopolizes",
1029
+ "monopolising": "monopolizing",
1030
+ "moralise": "moralize",
1031
+ "moralised": "moralized",
1032
+ "moralises": "moralizes",
1033
+ "moralising": "moralizing",
1034
+ "motorised": "motorized",
1035
+ "mould": "mold",
1036
+ "moulded": "molded",
1037
+ "moulder": "molder",
1038
+ "mouldered": "moldered",
1039
+ "mouldering": "moldering",
1040
+ "moulders": "molders",
1041
+ "mouldier": "moldier",
1042
+ "mouldiest": "moldiest",
1043
+ "moulding": "molding",
1044
+ "mouldings": "moldings",
1045
+ "moulds": "molds",
1046
+ "mouldy": "moldy",
1047
+ "moult": "molt",
1048
+ "moulted": "molted",
1049
+ "moulting": "molting",
1050
+ "moults": "molts",
1051
+ "moustache": "mustache",
1052
+ "moustached": "mustached",
1053
+ "moustaches": "mustaches",
1054
+ "moustachioed": "mustachioed",
1055
+ "multicoloured": "multicolored",
1056
+ "nationalisation": "nationalization",
1057
+ "nationalisations": "nationalizations",
1058
+ "nationalise": "nationalize",
1059
+ "nationalised": "nationalized",
1060
+ "nationalises": "nationalizes",
1061
+ "nationalising": "nationalizing",
1062
+ "naturalisation": "naturalization",
1063
+ "naturalise": "naturalize",
1064
+ "naturalised": "naturalized",
1065
+ "naturalises": "naturalizes",
1066
+ "naturalising": "naturalizing",
1067
+ "neighbour": "neighbor",
1068
+ "neighbourhood": "neighborhood",
1069
+ "neighbourhoods": "neighborhoods",
1070
+ "neighbouring": "neighboring",
1071
+ "neighbourliness": "neighborliness",
1072
+ "neighbourly": "neighborly",
1073
+ "neighbours": "neighbors",
1074
+ "neutralisation": "neutralization",
1075
+ "neutralise": "neutralize",
1076
+ "neutralised": "neutralized",
1077
+ "neutralises": "neutralizes",
1078
+ "neutralising": "neutralizing",
1079
+ "normalisation": "normalization",
1080
+ "normalise": "normalize",
1081
+ "normalised": "normalized",
1082
+ "normalises": "normalizes",
1083
+ "normalising": "normalizing",
1084
+ "odour": "odor",
1085
+ "odourless": "odorless",
1086
+ "odours": "odors",
1087
+ "oesophagus": "esophagus",
1088
+ "oesophaguses": "esophaguses",
1089
+ "oestrogen": "estrogen",
1090
+ "offence": "offense",
1091
+ "offences": "offenses",
1092
+ "omelette": "omelet",
1093
+ "omelettes": "omelets",
1094
+ "optimise": "optimize",
1095
+ "optimised": "optimized",
1096
+ "optimises": "optimizes",
1097
+ "optimising": "optimizing",
1098
+ "organisation": "organization",
1099
+ "organisational": "organizational",
1100
+ "organisations": "organizations",
1101
+ "organise": "organize",
1102
+ "organised": "organized",
1103
+ "organiser": "organizer",
1104
+ "organisers": "organizers",
1105
+ "organises": "organizes",
1106
+ "organising": "organizing",
1107
+ "orthopaedic": "orthopedic",
1108
+ "orthopaedics": "orthopedics",
1109
+ "ostracise": "ostracize",
1110
+ "ostracised": "ostracized",
1111
+ "ostracises": "ostracizes",
1112
+ "ostracising": "ostracizing",
1113
+ "outmanoeuvre": "outmaneuver",
1114
+ "outmanoeuvred": "outmaneuvered",
1115
+ "outmanoeuvres": "outmaneuvers",
1116
+ "outmanoeuvring": "outmaneuvering",
1117
+ "overemphasise": "overemphasize",
1118
+ "overemphasised": "overemphasized",
1119
+ "overemphasises": "overemphasizes",
1120
+ "overemphasising": "overemphasizing",
1121
+ "oxidisation": "oxidization",
1122
+ "oxidise": "oxidize",
1123
+ "oxidised": "oxidized",
1124
+ "oxidises": "oxidizes",
1125
+ "oxidising": "oxidizing",
1126
+ "paederast": "pederast",
1127
+ "paederasts": "pederasts",
1128
+ "paediatric": "pediatric",
1129
+ "paediatrician": "pediatrician",
1130
+ "paediatricians": "pediatricians",
1131
+ "paediatrics": "pediatrics",
1132
+ "paedophile": "pedophile",
1133
+ "paedophiles": "pedophiles",
1134
+ "paedophilia": "pedophilia",
1135
+ "palaeolithic": "paleolithic",
1136
+ "palaeontologist": "paleontologist",
1137
+ "palaeontologists": "paleontologists",
1138
+ "palaeontology": "paleontology",
1139
+ "panelled": "paneled",
1140
+ "panelling": "paneling",
1141
+ "panellist": "panelist",
1142
+ "panellists": "panelists",
1143
+ "paralyse": "paralyze",
1144
+ "paralysed": "paralyzed",
1145
+ "paralyses": "paralyzes",
1146
+ "paralysing": "paralyzing",
1147
+ "parcelled": "parceled",
1148
+ "parcelling": "parceling",
1149
+ "parlour": "parlor",
1150
+ "parlours": "parlors",
1151
+ "particularise": "particularize",
1152
+ "particularised": "particularized",
1153
+ "particularises": "particularizes",
1154
+ "particularising": "particularizing",
1155
+ "passivisation": "passivization",
1156
+ "passivise": "passivize",
1157
+ "passivised": "passivized",
1158
+ "passivises": "passivizes",
1159
+ "passivising": "passivizing",
1160
+ "pasteurisation": "pasteurization",
1161
+ "pasteurise": "pasteurize",
1162
+ "pasteurised": "pasteurized",
1163
+ "pasteurises": "pasteurizes",
1164
+ "pasteurising": "pasteurizing",
1165
+ "patronise": "patronize",
1166
+ "patronised": "patronized",
1167
+ "patronises": "patronizes",
1168
+ "patronising": "patronizing",
1169
+ "patronisingly": "patronizingly",
1170
+ "pedalled": "pedaled",
1171
+ "pedalling": "pedaling",
1172
+ "pedestrianisation": "pedestrianization",
1173
+ "pedestrianise": "pedestrianize",
1174
+ "pedestrianised": "pedestrianized",
1175
+ "pedestrianises": "pedestrianizes",
1176
+ "pedestrianising": "pedestrianizing",
1177
+ "penalise": "penalize",
1178
+ "penalised": "penalized",
1179
+ "penalises": "penalizes",
1180
+ "penalising": "penalizing",
1181
+ "pencilled": "penciled",
1182
+ "pencilling": "penciling",
1183
+ "personalise": "personalize",
1184
+ "personalised": "personalized",
1185
+ "personalises": "personalizes",
1186
+ "personalising": "personalizing",
1187
+ "pharmacopoeia": "pharmacopeia",
1188
+ "pharmacopoeias": "pharmacopeias",
1189
+ "philosophise": "philosophize",
1190
+ "philosophised": "philosophized",
1191
+ "philosophises": "philosophizes",
1192
+ "philosophising": "philosophizing",
1193
+ "philtre": "filter",
1194
+ "philtres": "filters",
1195
+ "phoney": "phony",
1196
+ "plagiarise": "plagiarize",
1197
+ "plagiarised": "plagiarized",
1198
+ "plagiarises": "plagiarizes",
1199
+ "plagiarising": "plagiarizing",
1200
+ "plough": "plow",
1201
+ "ploughed": "plowed",
1202
+ "ploughing": "plowing",
1203
+ "ploughman": "plowman",
1204
+ "ploughmen": "plowmen",
1205
+ "ploughs": "plows",
1206
+ "ploughshare": "plowshare",
1207
+ "ploughshares": "plowshares",
1208
+ "polarisation": "polarization",
1209
+ "polarise": "polarize",
1210
+ "polarised": "polarized",
1211
+ "polarises": "polarizes",
1212
+ "polarising": "polarizing",
1213
+ "politicisation": "politicization",
1214
+ "politicise": "politicize",
1215
+ "politicised": "politicized",
1216
+ "politicises": "politicizes",
1217
+ "politicising": "politicizing",
1218
+ "popularisation": "popularization",
1219
+ "popularise": "popularize",
1220
+ "popularised": "popularized",
1221
+ "popularises": "popularizes",
1222
+ "popularising": "popularizing",
1223
+ "pouffe": "pouf",
1224
+ "pouffes": "poufs",
1225
+ "practise": "practice",
1226
+ "practised": "practiced",
1227
+ "practises": "practices",
1228
+ "practising": "practicing",
1229
+ "praesidium": "presidium",
1230
+ "praesidiums": "presidiums",
1231
+ "pressurisation": "pressurization",
1232
+ "pressurise": "pressurize",
1233
+ "pressurised": "pressurized",
1234
+ "pressurises": "pressurizes",
1235
+ "pressurising": "pressurizing",
1236
+ "pretence": "pretense",
1237
+ "pretences": "pretenses",
1238
+ "primaeval": "primeval",
1239
+ "prioritisation": "prioritization",
1240
+ "prioritise": "prioritize",
1241
+ "prioritised": "prioritized",
1242
+ "prioritises": "prioritizes",
1243
+ "prioritising": "prioritizing",
1244
+ "privatisation": "privatization",
1245
+ "privatisations": "privatizations",
1246
+ "privatise": "privatize",
1247
+ "privatised": "privatized",
1248
+ "privatises": "privatizes",
1249
+ "privatising": "privatizing",
1250
+ "professionalisation": "professionalization",
1251
+ "professionalise": "professionalize",
1252
+ "professionalised": "professionalized",
1253
+ "professionalises": "professionalizes",
1254
+ "professionalising": "professionalizing",
1255
+ "programme": "program",
1256
+ "programmes": "programs",
1257
+ "prologue": "prolog",
1258
+ "prologues": "prologs",
1259
+ "propagandise": "propagandize",
1260
+ "propagandised": "propagandized",
1261
+ "propagandises": "propagandizes",
1262
+ "propagandising": "propagandizing",
1263
+ "proselytise": "proselytize",
1264
+ "proselytised": "proselytized",
1265
+ "proselytiser": "proselytizer",
1266
+ "proselytisers": "proselytizers",
1267
+ "proselytises": "proselytizes",
1268
+ "proselytising": "proselytizing",
1269
+ "psychoanalyse": "psychoanalyze",
1270
+ "psychoanalysed": "psychoanalyzed",
1271
+ "psychoanalyses": "psychoanalyzes",
1272
+ "psychoanalysing": "psychoanalyzing",
1273
+ "publicise": "publicize",
1274
+ "publicised": "publicized",
1275
+ "publicises": "publicizes",
1276
+ "publicising": "publicizing",
1277
+ "pulverisation": "pulverization",
1278
+ "pulverise": "pulverize",
1279
+ "pulverised": "pulverized",
1280
+ "pulverises": "pulverizes",
1281
+ "pulverising": "pulverizing",
1282
+ "pummelled": "pummel",
1283
+ "pummelling": "pummeled",
1284
+ "pyjama": "pajama",
1285
+ "pyjamas": "pajamas",
1286
+ "pzazz": "pizzazz",
1287
+ "quarrelled": "quarreled",
1288
+ "quarrelling": "quarreling",
1289
+ "radicalise": "radicalize",
1290
+ "radicalised": "radicalized",
1291
+ "radicalises": "radicalizes",
1292
+ "radicalising": "radicalizing",
1293
+ "rancour": "rancor",
1294
+ "randomise": "randomize",
1295
+ "randomised": "randomized",
1296
+ "randomises": "randomizes",
1297
+ "randomising": "randomizing",
1298
+ "rationalisation": "rationalization",
1299
+ "rationalisations": "rationalizations",
1300
+ "rationalise": "rationalize",
1301
+ "rationalised": "rationalized",
1302
+ "rationalises": "rationalizes",
1303
+ "rationalising": "rationalizing",
1304
+ "ravelled": "raveled",
1305
+ "ravelling": "raveling",
1306
+ "realisable": "realizable",
1307
+ "realisation": "realization",
1308
+ "realisations": "realizations",
1309
+ "realise": "realize",
1310
+ "realised": "realized",
1311
+ "realises": "realizes",
1312
+ "realising": "realizing",
1313
+ "recognisable": "recognizable",
1314
+ "recognisably": "recognizably",
1315
+ "recognisance": "recognizance",
1316
+ "recognise": "recognize",
1317
+ "recognised": "recognized",
1318
+ "recognises": "recognizes",
1319
+ "recognising": "recognizing",
1320
+ "reconnoitre": "reconnoiter",
1321
+ "reconnoitred": "reconnoitered",
1322
+ "reconnoitres": "reconnoiters",
1323
+ "reconnoitring": "reconnoitering",
1324
+ "refuelled": "refueled",
1325
+ "refuelling": "refueling",
1326
+ "regularisation": "regularization",
1327
+ "regularise": "regularize",
1328
+ "regularised": "regularized",
1329
+ "regularises": "regularizes",
1330
+ "regularising": "regularizing",
1331
+ "remodelled": "remodeled",
1332
+ "remodelling": "remodeling",
1333
+ "remould": "remold",
1334
+ "remoulded": "remolded",
1335
+ "remoulding": "remolding",
1336
+ "remoulds": "remolds",
1337
+ "reorganisation": "reorganization",
1338
+ "reorganisations": "reorganizations",
1339
+ "reorganise": "reorganize",
1340
+ "reorganised": "reorganized",
1341
+ "reorganises": "reorganizes",
1342
+ "reorganising": "reorganizing",
1343
+ "revelled": "reveled",
1344
+ "reveller": "reveler",
1345
+ "revellers": "revelers",
1346
+ "revelling": "reveling",
1347
+ "revitalise": "revitalize",
1348
+ "revitalised": "revitalized",
1349
+ "revitalises": "revitalizes",
1350
+ "revitalising": "revitalizing",
1351
+ "revolutionise": "revolutionize",
1352
+ "revolutionised": "revolutionized",
1353
+ "revolutionises": "revolutionizes",
1354
+ "revolutionising": "revolutionizing",
1355
+ "rhapsodise": "rhapsodize",
1356
+ "rhapsodised": "rhapsodized",
1357
+ "rhapsodises": "rhapsodizes",
1358
+ "rhapsodising": "rhapsodizing",
1359
+ "rigour": "rigor",
1360
+ "rigours": "rigors",
1361
+ "ritualised": "ritualized",
1362
+ "rivalled": "rivaled",
1363
+ "rivalling": "rivaling",
1364
+ "romanticise": "romanticize",
1365
+ "romanticised": "romanticized",
1366
+ "romanticises": "romanticizes",
1367
+ "romanticising": "romanticizing",
1368
+ "rumour": "rumor",
1369
+ "rumoured": "rumored",
1370
+ "rumours": "rumors",
1371
+ "sabre": "saber",
1372
+ "sabres": "sabers",
1373
+ "saltpetre": "saltpeter",
1374
+ "sanitise": "sanitize",
1375
+ "sanitised": "sanitized",
1376
+ "sanitises": "sanitizes",
1377
+ "sanitising": "sanitizing",
1378
+ "satirise": "satirize",
1379
+ "satirised": "satirized",
1380
+ "satirises": "satirizes",
1381
+ "satirising": "satirizing",
1382
+ "saviour": "savior",
1383
+ "saviours": "saviors",
1384
+ "savour": "savor",
1385
+ "savoured": "savored",
1386
+ "savouries": "savories",
1387
+ "savouring": "savoring",
1388
+ "savours": "savors",
1389
+ "savoury": "savory",
1390
+ "scandalise": "scandalize",
1391
+ "scandalised": "scandalized",
1392
+ "scandalises": "scandalizes",
1393
+ "scandalising": "scandalizing",
1394
+ "sceptic": "skeptic",
1395
+ "sceptical": "skeptical",
1396
+ "sceptically": "skeptically",
1397
+ "scepticism": "skepticism",
1398
+ "sceptics": "skeptics",
1399
+ "sceptre": "scepter",
1400
+ "sceptres": "scepters",
1401
+ "scrutinise": "scrutinize",
1402
+ "scrutinised": "scrutinized",
1403
+ "scrutinises": "scrutinizes",
1404
+ "scrutinising": "scrutinizing",
1405
+ "secularisation": "secularization",
1406
+ "secularise": "secularize",
1407
+ "secularised": "secularized",
1408
+ "secularises": "secularizes",
1409
+ "secularising": "secularizing",
1410
+ "sensationalise": "sensationalize",
1411
+ "sensationalised": "sensationalized",
1412
+ "sensationalises": "sensationalizes",
1413
+ "sensationalising": "sensationalizing",
1414
+ "sensitise": "sensitize",
1415
+ "sensitised": "sensitized",
1416
+ "sensitises": "sensitizes",
1417
+ "sensitising": "sensitizing",
1418
+ "sentimentalise": "sentimentalize",
1419
+ "sentimentalised": "sentimentalized",
1420
+ "sentimentalises": "sentimentalizes",
1421
+ "sentimentalising": "sentimentalizing",
1422
+ "sepulchre": "sepulcher",
1423
+ "sepulchres": "sepulchers",
1424
+ "serialisation": "serialization",
1425
+ "serialisations": "serializations",
1426
+ "serialise": "serialize",
1427
+ "serialised": "serialized",
1428
+ "serialises": "serializes",
1429
+ "serialising": "serializing",
1430
+ "sermonise": "sermonize",
1431
+ "sermonised": "sermonized",
1432
+ "sermonises": "sermonizes",
1433
+ "sermonising": "sermonizing",
1434
+ "sheikh": "sheik",
1435
+ "shovelled": "shoveled",
1436
+ "shovelling": "shoveling",
1437
+ "shrivelled": "shriveled",
1438
+ "shrivelling": "shriveling",
1439
+ "signalise": "signalize",
1440
+ "signalised": "signalized",
1441
+ "signalises": "signalizes",
1442
+ "signalising": "signalizing",
1443
+ "signalled": "signaled",
1444
+ "signalling": "signaling",
1445
+ "smoulder": "smolder",
1446
+ "smouldered": "smoldered",
1447
+ "smouldering": "smoldering",
1448
+ "smoulders": "smolders",
1449
+ "snivelled": "sniveled",
1450
+ "snivelling": "sniveling",
1451
+ "snorkelled": "snorkeled",
1452
+ "snorkelling": "snorkeling",
1453
+ "snowplough": "snowplow",
1454
+ "snowploughs": "snowplow",
1455
+ "socialisation": "socialization",
1456
+ "socialise": "socialize",
1457
+ "socialised": "socialized",
1458
+ "socialises": "socializes",
1459
+ "socialising": "socializing",
1460
+ "sodomise": "sodomize",
1461
+ "sodomised": "sodomized",
1462
+ "sodomises": "sodomizes",
1463
+ "sodomising": "sodomizing",
1464
+ "solemnise": "solemnize",
1465
+ "solemnised": "solemnized",
1466
+ "solemnises": "solemnizes",
1467
+ "solemnising": "solemnizing",
1468
+ "sombre": "somber",
1469
+ "specialisation": "specialization",
1470
+ "specialisations": "specializations",
1471
+ "specialise": "specialize",
1472
+ "specialised": "specialized",
1473
+ "specialises": "specializes",
1474
+ "specialising": "specializing",
1475
+ "spectre": "specter",
1476
+ "spectres": "specters",
1477
+ "spiralled": "spiraled",
1478
+ "spiralling": "spiraling",
1479
+ "splendour": "splendor",
1480
+ "splendours": "splendors",
1481
+ "squirrelled": "squirreled",
1482
+ "squirrelling": "squirreling",
1483
+ "stabilisation": "stabilization",
1484
+ "stabilise": "stabilize",
1485
+ "stabilised": "stabilized",
1486
+ "stabiliser": "stabilizer",
1487
+ "stabilisers": "stabilizers",
1488
+ "stabilises": "stabilizes",
1489
+ "stabilising": "stabilizing",
1490
+ "standardisation": "standardization",
1491
+ "standardise": "standardize",
1492
+ "standardised": "standardized",
1493
+ "standardises": "standardizes",
1494
+ "standardising": "standardizing",
1495
+ "stencilled": "stenciled",
1496
+ "stencilling": "stenciling",
1497
+ "sterilisation": "sterilization",
1498
+ "sterilisations": "sterilizations",
1499
+ "sterilise": "sterilize",
1500
+ "sterilised": "sterilized",
1501
+ "steriliser": "sterilizer",
1502
+ "sterilisers": "sterilizers",
1503
+ "sterilises": "sterilizes",
1504
+ "sterilising": "sterilizing",
1505
+ "stigmatisation": "stigmatization",
1506
+ "stigmatise": "stigmatize",
1507
+ "stigmatised": "stigmatized",
1508
+ "stigmatises": "stigmatizes",
1509
+ "stigmatising": "stigmatizing",
1510
+ "storey": "story",
1511
+ "storeys": "stories",
1512
+ "subsidisation": "subsidization",
1513
+ "subsidise": "subsidize",
1514
+ "subsidised": "subsidized",
1515
+ "subsidiser": "subsidizer",
1516
+ "subsidisers": "subsidizers",
1517
+ "subsidises": "subsidizes",
1518
+ "subsidising": "subsidizing",
1519
+ "succour": "succor",
1520
+ "succoured": "succored",
1521
+ "succouring": "succoring",
1522
+ "succours": "succors",
1523
+ "sulphate": "sulfate",
1524
+ "sulphates": "sulfates",
1525
+ "sulphide": "sulfide",
1526
+ "sulphides": "sulfides",
1527
+ "sulphur": "sulfur",
1528
+ "sulphurous": "sulfurous",
1529
+ "summarise": "summarize",
1530
+ "summarised": "summarized",
1531
+ "summarises": "summarizes",
1532
+ "summarising": "summarizing",
1533
+ "swivelled": "swiveled",
1534
+ "swivelling": "swiveling",
1535
+ "symbolise": "symbolize",
1536
+ "symbolised": "symbolized",
1537
+ "symbolises": "symbolizes",
1538
+ "symbolising": "symbolizing",
1539
+ "sympathise": "sympathize",
1540
+ "sympathised": "sympathized",
1541
+ "sympathiser": "sympathizer",
1542
+ "sympathisers": "sympathizers",
1543
+ "sympathises": "sympathizes",
1544
+ "sympathising": "sympathizing",
1545
+ "synchronisation": "synchronization",
1546
+ "synchronise": "synchronize",
1547
+ "synchronised": "synchronized",
1548
+ "synchronises": "synchronizes",
1549
+ "synchronising": "synchronizing",
1550
+ "synthesise": "synthesize",
1551
+ "synthesised": "synthesized",
1552
+ "synthesiser": "synthesizer",
1553
+ "synthesisers": "synthesizers",
1554
+ "synthesises": "synthesizes",
1555
+ "synthesising": "synthesizing",
1556
+ "syphon": "siphon",
1557
+ "syphoned": "siphoned",
1558
+ "syphoning": "siphoning",
1559
+ "syphons": "siphons",
1560
+ "systematisation": "systematization",
1561
+ "systematise": "systematize",
1562
+ "systematised": "systematized",
1563
+ "systematises": "systematizes",
1564
+ "systematising": "systematizing",
1565
+ "tantalise": "tantalize",
1566
+ "tantalised": "tantalized",
1567
+ "tantalises": "tantalizes",
1568
+ "tantalising": "tantalizing",
1569
+ "tantalisingly": "tantalizingly",
1570
+ "tasselled": "tasseled",
1571
+ "technicolour": "technicolor",
1572
+ "temporise": "temporize",
1573
+ "temporised": "temporized",
1574
+ "temporises": "temporizes",
1575
+ "temporising": "temporizing",
1576
+ "tenderise": "tenderize",
1577
+ "tenderised": "tenderized",
1578
+ "tenderises": "tenderizes",
1579
+ "tenderising": "tenderizing",
1580
+ "terrorise": "terrorize",
1581
+ "terrorised": "terrorized",
1582
+ "terrorises": "terrorizes",
1583
+ "terrorising": "terrorizing",
1584
+ "theatre": "theater",
1585
+ "theatregoer": "theatergoer",
1586
+ "theatregoers": "theatergoers",
1587
+ "theatres": "theaters",
1588
+ "theorise": "theorize",
1589
+ "theorised": "theorized",
1590
+ "theorises": "theorizes",
1591
+ "theorising": "theorizing",
1592
+ "tonne": "ton",
1593
+ "tonnes": "tons",
1594
+ "towelled": "toweled",
1595
+ "towelling": "toweling",
1596
+ "toxaemia": "toxemia",
1597
+ "tranquillise": "tranquilize",
1598
+ "tranquillised": "tranquilized",
1599
+ "tranquilliser": "tranquilizer",
1600
+ "tranquillisers": "tranquilizers",
1601
+ "tranquillises": "tranquilizes",
1602
+ "tranquillising": "tranquilizing",
1603
+ "tranquillity": "tranquility",
1604
+ "tranquillize": "tranquilize",
1605
+ "tranquillized": "tranquilized",
1606
+ "tranquillizer": "tranquilizer",
1607
+ "tranquillizers": "tranquilizers",
1608
+ "tranquillizes": "tranquilizes",
1609
+ "tranquillizing": "tranquilizing",
1610
+ "tranquilly": "tranquility",
1611
+ "transistorised": "transistorized",
1612
+ "traumatise": "traumatize",
1613
+ "traumatised": "traumatized",
1614
+ "traumatises": "traumatizes",
1615
+ "traumatising": "traumatizing",
1616
+ "travelled": "traveled",
1617
+ "traveller": "traveler",
1618
+ "travellers": "travelers",
1619
+ "travelling": "traveling",
1620
+ "travelog": "travelogue",
1621
+ "travelogs": "travelogues",
1622
+ "trialled": "trialed",
1623
+ "trialling": "trialing",
1624
+ "tricolour": "tricolor",
1625
+ "tricolours": "tricolors",
1626
+ "trivialise": "trivialize",
1627
+ "trivialised": "trivialized",
1628
+ "trivialises": "trivializes",
1629
+ "trivialising": "trivializing",
1630
+ "tumour": "tumor",
1631
+ "tumours": "tumors",
1632
+ "tunnelled": "tunneled",
1633
+ "tunnelling": "tunneling",
1634
+ "tyrannise": "tyrannize",
1635
+ "tyrannised": "tyrannized",
1636
+ "tyrannises": "tyrannizes",
1637
+ "tyrannising": "tyrannizing",
1638
+ "tyre": "tire",
1639
+ "tyres": "tires",
1640
+ "unauthorised": "unauthorized",
1641
+ "uncivilised": "uncivilized",
1642
+ "underutilised": "underutilized",
1643
+ "unequalled": "unequaled",
1644
+ "unfavourable": "unfavorable",
1645
+ "unfavourably": "unfavorably",
1646
+ "unionisation": "unionization",
1647
+ "unionise": "unionize",
1648
+ "unionised": "unionized",
1649
+ "unionises": "unionizes",
1650
+ "unionising": "unionizing",
1651
+ "unorganised": "unorganized",
1652
+ "unravelled": "unraveled",
1653
+ "unravelling": "unraveling",
1654
+ "unrecognisable": "unrecognizable",
1655
+ "unrecognised": "unrecognized",
1656
+ "unrivalled": "unrivaled",
1657
+ "unsavoury": "unsavory",
1658
+ "untrammelled": "untrammeled",
1659
+ "urbanisation": "urbanization",
1660
+ "urbanise": "urbanize",
1661
+ "urbanised": "urbanized",
1662
+ "urbanises": "urbanizes",
1663
+ "urbanising": "urbanizing",
1664
+ "utilisable": "utilizable",
1665
+ "utilisation": "utilization",
1666
+ "utilise": "utilize",
1667
+ "utilised": "utilized",
1668
+ "utilises": "utilizes",
1669
+ "utilising": "utilizing",
1670
+ "valour": "valor",
1671
+ "vandalise": "vandalize",
1672
+ "vandalised": "vandalized",
1673
+ "vandalises": "vandalizes",
1674
+ "vandalising": "vandalizing",
1675
+ "vaporisation": "vaporization",
1676
+ "vaporise": "vaporize",
1677
+ "vaporised": "vaporized",
1678
+ "vaporises": "vaporizes",
1679
+ "vaporising": "vaporizing",
1680
+ "vapour": "vapor",
1681
+ "vapours": "vapors",
1682
+ "verbalise": "verbalize",
1683
+ "verbalised": "verbalized",
1684
+ "verbalises": "verbalizes",
1685
+ "verbalising": "verbalizing",
1686
+ "victimisation": "victimization",
1687
+ "victimise": "victimize",
1688
+ "victimised": "victimized",
1689
+ "victimises": "victimizes",
1690
+ "victimising": "victimizing",
1691
+ "videodisc": "videodisk",
1692
+ "videodiscs": "videodisks",
1693
+ "vigour": "vigor",
1694
+ "visualisation": "visualization",
1695
+ "visualisations": "visualizations",
1696
+ "visualise": "visualize",
1697
+ "visualised": "visualized",
1698
+ "visualises": "visualizes",
1699
+ "visualising": "visualizing",
1700
+ "vocalisation": "vocalization",
1701
+ "vocalisations": "vocalizations",
1702
+ "vocalise": "vocalize",
1703
+ "vocalised": "vocalized",
1704
+ "vocalises": "vocalizes",
1705
+ "vocalising": "vocalizing",
1706
+ "vulcanised": "vulcanized",
1707
+ "vulgarisation": "vulgarization",
1708
+ "vulgarise": "vulgarize",
1709
+ "vulgarised": "vulgarized",
1710
+ "vulgarises": "vulgarizes",
1711
+ "vulgarising": "vulgarizing",
1712
+ "waggon": "wagon",
1713
+ "waggons": "wagons",
1714
+ "watercolour": "watercolor",
1715
+ "watercolours": "watercolors",
1716
+ "weaselled": "weaseled",
1717
+ "weaselling": "weaseling",
1718
+ "westernisation": "westernization",
1719
+ "westernise": "westernize",
1720
+ "westernised": "westernized",
1721
+ "westernises": "westernizes",
1722
+ "westernising": "westernizing",
1723
+ "womanise": "womanize",
1724
+ "womanised": "womanized",
1725
+ "womaniser": "womanizer",
1726
+ "womanisers": "womanizers",
1727
+ "womanises": "womanizes",
1728
+ "womanising": "womanizing",
1729
+ "woollen": "woolen",
1730
+ "woollens": "woolens",
1731
+ "woollies": "woolies",
1732
+ "woolly": "wooly",
1733
+ "worshipped": "worshiped",
1734
+ "worshipper": "worshiper",
1735
+ "worshipping": "worshiping",
1736
+ "yodelled": "yodeled",
1737
+ "yodelling": "yodeling",
1738
+ "yoghourt": "yogurt",
1739
+ "yoghourts": "yogurts",
1740
+ "yoghurt": "yogurt",
1741
+ "yoghurts": "yogurts"
1742
+ }
~/.cache/models--openai--whisper-medium/blobs/f84be5dbc1bfd09035c3fd3e01b777bc47f14a66 ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/whisper-medium",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "architectures": [
6
+ "WhisperForConditionalGeneration"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "begin_suppress_tokens": [
10
+ 220,
11
+ 50257
12
+ ],
13
+ "bos_token_id": 50257,
14
+ "d_model": 1024,
15
+ "decoder_attention_heads": 16,
16
+ "decoder_ffn_dim": 4096,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 24,
19
+ "decoder_start_token_id": 50258,
20
+ "dropout": 0.0,
21
+ "encoder_attention_heads": 16,
22
+ "encoder_ffn_dim": 4096,
23
+ "encoder_layerdrop": 0.0,
24
+ "encoder_layers": 24,
25
+ "eos_token_id": 50257,
26
+ "forced_decoder_ids": [
27
+ [
28
+ 1,
29
+ 50259
30
+ ],
31
+ [
32
+ 2,
33
+ 50359
34
+ ],
35
+ [
36
+ 3,
37
+ 50363
38
+ ]
39
+ ],
40
+ "init_std": 0.02,
41
+ "is_encoder_decoder": true,
42
+ "max_length": 448,
43
+ "max_source_positions": 1500,
44
+ "max_target_positions": 448,
45
+ "model_type": "whisper",
46
+ "num_hidden_layers": 24,
47
+ "num_mel_bins": 80,
48
+ "pad_token_id": 50257,
49
+ "scale_embedding": false,
50
+ "suppress_tokens": [
51
+ 1,
52
+ 2,
53
+ 7,
54
+ 8,
55
+ 9,
56
+ 10,
57
+ 14,
58
+ 25,
59
+ 26,
60
+ 27,
61
+ 28,
62
+ 29,
63
+ 31,
64
+ 58,
65
+ 59,
66
+ 60,
67
+ 61,
68
+ 62,
69
+ 63,
70
+ 90,
71
+ 91,
72
+ 92,
73
+ 93,
74
+ 359,
75
+ 503,
76
+ 522,
77
+ 542,
78
+ 873,
79
+ 893,
80
+ 902,
81
+ 918,
82
+ 922,
83
+ 931,
84
+ 1350,
85
+ 1853,
86
+ 1982,
87
+ 2460,
88
+ 2627,
89
+ 3246,
90
+ 3253,
91
+ 3268,
92
+ 3536,
93
+ 3846,
94
+ 3961,
95
+ 4183,
96
+ 4667,
97
+ 6585,
98
+ 6647,
99
+ 7273,
100
+ 9061,
101
+ 9383,
102
+ 10428,
103
+ 10929,
104
+ 11938,
105
+ 12033,
106
+ 12331,
107
+ 12562,
108
+ 13793,
109
+ 14157,
110
+ 14635,
111
+ 15265,
112
+ 15618,
113
+ 16553,
114
+ 16604,
115
+ 18362,
116
+ 18956,
117
+ 20075,
118
+ 21675,
119
+ 22520,
120
+ 26130,
121
+ 26161,
122
+ 26435,
123
+ 28279,
124
+ 29464,
125
+ 31650,
126
+ 32302,
127
+ 32470,
128
+ 36865,
129
+ 42863,
130
+ 47425,
131
+ 49870,
132
+ 50254,
133
+ 50258,
134
+ 50360,
135
+ 50361,
136
+ 50362
137
+ ],
138
+ "torch_dtype": "float32",
139
+ "transformers_version": "4.24.0.dev0",
140
+ "use_cache": true,
141
+ "vocab_size": 51865
142
+ }
~/.cache/models--openai--whisper-medium/refs/main ADDED
@@ -0,0 +1 @@
 
 
1
+ a0b3589e1034234495a1b696c28d4832cdaf8a32
~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../blobs/47e9dd31523ecea227504afad3870da1cfe5ad81
~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../blobs/f84be5dbc1bfd09035c3fd3e01b777bc47f14a66
~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/merges.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../blobs/3a00c89ee5e8ae0cb159a6ec838843fb2266fac6
~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/normalizer.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../blobs/dd6ae819ad738ac1a546e9f9282ef325c33b9ea0
~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/preprocessor_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../blobs/c2048dfa9fd94a052e62e908d2c4dfb18534b4d2
~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30a744be592bdbc567915370b1a83438bb77847eaf0443926d1966d4b857a237
3
+ size 76
~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../blobs/9115b6806f75d5122486b0e1ae0279a0207199c2
~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../blobs/5e6c8377adf6019428b34a1ad906fb43de71d387
~/.cache/models--openai--whisper-medium/snapshots/a0b3589e1034234495a1b696c28d4832cdaf8a32/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../blobs/0f3456460629e21d559c6daa23ab6ce3644e8271
~/.cache/tmp22vcmo7s ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acdef7fc4af7d3cc2cc34ae300364b3d385544f77a90f165b420c38c846e1c41
3
+ size 1246900224