versae commited on
Commit
dbdf429
·
2 Parent(s): d6b93b3 790cc21

Merge branch 'main' of https://huggingface.co/NbAiLab/whisper-flaxtest into main

Browse files
Files changed (39) hide show
  1. backup_run_flax_speech_recognition_seq2seq.py +0 -880
  2. backup_run_flax_speech_recognition_seq2seq_streaming.py +0 -874
  3. run.sh +5 -4
  4. run_flax_speech_recognition_seq2seq.py +0 -2
  5. run_flax_speech_recognition_seq2seq_streaming.py +69 -83
  6. run_test.sh → run_streaming.sh +5 -4
  7. whisper-small-flaxtest/added_tokens.json +108 -0
  8. whisper-small-flaxtest/config.json +143 -0
  9. whisper-small-flaxtest/events.out.tfevents.1677611724.t1v-n-d163ce9a-w-0.1583171.0.v2 +3 -0
  10. whisper-small-flaxtest/events.out.tfevents.1677613551.t1v-n-d163ce9a-w-0.1702844.0.v2 +3 -0
  11. whisper-small-flaxtest/events.out.tfevents.1677613844.t1v-n-d163ce9a-w-0.1706687.0.v2 +3 -0
  12. whisper-small-flaxtest/events.out.tfevents.1677614511.t1v-n-d163ce9a-w-0.1904376.0.v2 +3 -0
  13. whisper-small-flaxtest/events.out.tfevents.1677615119.t1v-n-d163ce9a-w-0.2101561.0.v2 +3 -0
  14. whisper-small-flaxtest/events.out.tfevents.1677615611.t1v-n-d163ce9a-w-0.2298739.0.v2 +3 -0
  15. whisper-small-flaxtest/merges.txt +0 -0
  16. whisper-small-flaxtest/normalizer.json +1742 -0
  17. whisper-small-flaxtest/preprocessor_config.json +14 -0
  18. whisper-small-flaxtest/special_tokens_map.json +114 -0
  19. whisper-small-flaxtest/tokenizer.json +0 -0
  20. whisper-small-flaxtest/tokenizer_config.json +35 -0
  21. whisper-small-flaxtest/vocab.json +0 -0
  22. whisper-tiny-ft-dummy/events.out.tfevents.1677582573.t1v-n-d163ce9a-w-0.1276805.0.v2 +3 -0
  23. whisper-tiny-ft-dummy/events.out.tfevents.1677585137.t1v-n-d163ce9a-w-0.1284051.0.v2 +3 -0
  24. whisper-tiny-ft-dummy/events.out.tfevents.1677587059.t1v-n-d163ce9a-w-0.1287692.0.v2 +3 -0
  25. whisper-tiny-ft-dummy/events.out.tfevents.1677587350.t1v-n-d163ce9a-w-0.1292303.0.v2 +3 -0
  26. whisper-tiny-ft-dummy/events.out.tfevents.1677588068.t1v-n-d163ce9a-w-0.1297330.0.v2 +3 -0
  27. whisper-tiny-ft-dummy/events.out.tfevents.1677588142.t1v-n-d163ce9a-w-0.1301760.0.v2 +3 -0
  28. whisper-tiny-ft-dummy/events.out.tfevents.1677588581.t1v-n-d163ce9a-w-0.1306471.0.v2 +3 -0
  29. whisper-tiny-ft-dummy/events.out.tfevents.1677590425.t1v-n-d163ce9a-w-0.1318486.0.v2 +3 -0
  30. whisper-tiny-ft-dummy/events.out.tfevents.1677590675.t1v-n-d163ce9a-w-0.1323104.0.v2 +3 -0
  31. whisper-tiny-ft-dummy/events.out.tfevents.1677591418.t1v-n-d163ce9a-w-0.1328351.0.v2 +3 -0
  32. whisper-tiny-ft-dummy/events.out.tfevents.1677591678.t1v-n-d163ce9a-w-0.1333009.0.v2 +3 -0
  33. whisper-tiny-ft-dummy/events.out.tfevents.1677591869.t1v-n-d163ce9a-w-0.1337579.0.v2 +3 -0
  34. whisper-tiny-ft-dummy/events.out.tfevents.1677592021.t1v-n-d163ce9a-w-0.1344023.0.v2 +3 -0
  35. whisper-tiny-ft-dummy/events.out.tfevents.1677592180.t1v-n-d163ce9a-w-0.1350466.0.v2 +3 -0
  36. whisper-tiny-ft-dummy/events.out.tfevents.1677592712.t1v-n-d163ce9a-w-0.1355445.0.v2 +3 -0
  37. whisper-tiny-ft-dummy/events.out.tfevents.1677596196.t1v-n-d163ce9a-w-0.1363328.0.v2 +3 -0
  38. whisper-tiny-ft-dummy/events.out.tfevents.1677596396.t1v-n-d163ce9a-w-0.1367877.0.v2 +3 -0
  39. whisper-tiny-ft-dummy/events.out.tfevents.1677596532.t1v-n-d163ce9a-w-0.1372356.0.v2 +3 -0
backup_run_flax_speech_recognition_seq2seq.py DELETED
@@ -1,880 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """
17
- Fine-tuning the Flax library models for sequence to sequence speech recognition.
18
- """
19
- # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
20
-
21
- import logging
22
- import math
23
- import os
24
- import sys
25
- import time
26
- from dataclasses import field
27
- from functools import partial
28
- from pathlib import Path
29
- from typing import Any, Callable, Dict, List, Optional, Union
30
-
31
- import datasets
32
- import flax
33
- import jax
34
- import jax.numpy as jnp
35
- import numpy as np
36
- import optax
37
- from datasets import Dataset, DatasetDict, load_dataset, load_metric
38
- from flax import jax_utils, traverse_util
39
- from flax.jax_utils import pad_shard_unpad, unreplicate
40
- from flax.training import train_state
41
- from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
42
- from huggingface_hub import Repository, create_repo
43
- from tqdm import tqdm
44
-
45
- import transformers
46
- from transformers import (
47
- AutoConfig,
48
- AutoFeatureExtractor,
49
- AutoProcessor,
50
- AutoTokenizer,
51
- FlaxAutoModelForSpeechSeq2Seq,
52
- HfArgumentParser,
53
- Seq2SeqTrainingArguments,
54
- is_tensorboard_available,
55
- )
56
- from transformers.file_utils import get_full_repo_name
57
- from transformers.utils import check_min_version, send_example_telemetry
58
- from transformers.utils.versions import require_version
59
-
60
-
61
- # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
62
- check_min_version("4.27.0.dev0")
63
-
64
- require_version("datasets>=1.18.0", "To fix: pip install -r examples/flax/speech-recogintion/requirements.txt")
65
-
66
- logger = logging.getLogger(__name__)
67
-
68
-
69
- @flax.struct.dataclass
70
- class ModelArguments:
71
- """
72
- Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
73
- """
74
-
75
- model_name_or_path: str = field(
76
- metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
77
- )
78
- config_name: Optional[str] = field(
79
- default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
80
- )
81
- tokenizer_name: Optional[str] = field(
82
- default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
83
- )
84
- feature_extractor_name: Optional[str] = field(
85
- default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
86
- )
87
- cache_dir: Optional[str] = field(
88
- default=None,
89
- metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
90
- )
91
- use_fast_tokenizer: bool = field(
92
- default=True,
93
- metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
94
- )
95
- model_revision: str = field(
96
- default="main",
97
- metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
98
- )
99
- use_auth_token: bool = field(
100
- default=False,
101
- metadata={
102
- "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
103
- "with private models)."
104
- },
105
- )
106
- dtype: Optional[str] = field(
107
- default="float32",
108
- metadata={
109
- "help": (
110
- "Floating-point format in which the model weights should be initialized and trained. Choose one of"
111
- " `[float32, float16, bfloat16]`."
112
- )
113
- },
114
- )
115
- num_beams: Optional[int] = field(
116
- default=None,
117
- metadata={
118
- "help": (
119
- "Number of beams to use for evaluation. This argument will be passed to `model.generate`, "
120
- "which is used during evaluation."
121
- )
122
- },
123
- )
124
-
125
-
126
- @flax.struct.dataclass
127
- class DataTrainingArguments:
128
- """
129
- Arguments pertaining to what data we are going to input our model for training and eval.
130
- """
131
-
132
- dataset_name: str = field(
133
- default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
134
- )
135
- dataset_config_name: Optional[str] = field(
136
- default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
137
- )
138
- text_column: Optional[str] = field(
139
- default=None,
140
- metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
141
- )
142
- dataset_cache_dir: Optional[str] = field(
143
- default=None, metadata={"help": "Path to cache directory for saving and loading datasets"}
144
- )
145
- overwrite_cache: bool = field(
146
- default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
147
- )
148
- preprocessing_num_workers: Optional[int] = field(
149
- default=None,
150
- metadata={"help": "The number of processes to use for the preprocessing."},
151
- )
152
- max_train_samples: Optional[int] = field(
153
- default=None,
154
- metadata={
155
- "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
156
- "value if set."
157
- },
158
- )
159
- max_eval_samples: Optional[int] = field(
160
- default=None,
161
- metadata={
162
- "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
163
- "value if set."
164
- },
165
- )
166
- audio_column_name: str = field(
167
- default="audio",
168
- metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
169
- )
170
- text_column_name: str = field(
171
- default="text",
172
- metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
173
- )
174
- max_duration_in_seconds: float = field(
175
- default=20.0,
176
- metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"},
177
- )
178
- min_duration_in_seconds: float = field(
179
- default=0.0,
180
- metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"},
181
- )
182
- max_label_length: float = field(
183
- default=128,
184
- metadata={"help": "Truncate transcriptions that are longer `max_eval_length` tokens."},
185
- )
186
- pad_input_to_multiple_of: Optional[int] = field(
187
- default=None,
188
- metadata={
189
- "help": "If set will pad the input sequence to a multiple of the provided value. "
190
- "This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the inputs to max length."
191
- },
192
- )
193
- pad_target_to_multiple_of: Optional[int] = field(
194
- default=None,
195
- metadata={
196
- "help": "If set will pad the target sequence to a multiple of the provided value. "
197
- "This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the targets to max length."
198
- },
199
- )
200
- preprocessing_only: bool = field(
201
- default=False,
202
- metadata={
203
- "help": "Whether to only do data preprocessing and skip training. "
204
- "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
205
- "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
206
- "so that the cached datasets can consequently be loaded in distributed training"
207
- },
208
- )
209
- train_split_name: str = field(
210
- default="train",
211
- metadata={
212
- "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
213
- },
214
- )
215
- eval_split_name: str = field(
216
- default="validation",
217
- metadata={
218
- "help": "The name of the evaluation data set split to use (via the datasets library). Defaults to 'validation'"
219
- },
220
- )
221
- do_lower_case: bool = field(
222
- default=True,
223
- metadata={"help": "Whether the target text should be lower cased."},
224
- )
225
- language: str = field(
226
- default=None,
227
- metadata={
228
- "help": (
229
- "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning "
230
- "only. For English speech recognition, it should be set to `None`."
231
- )
232
- },
233
- )
234
- task: str = field(
235
- default="transcribe",
236
- metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."},
237
- )
238
-
239
-
240
- def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray:
241
- """
242
- Shift label ids one token to the right.
243
- """
244
- shifted_label_ids = np.zeros_like(label_ids)
245
- shifted_label_ids[:, 1:] = label_ids[:, :-1]
246
- shifted_label_ids[:, 0] = decoder_start_token_id
247
-
248
- return shifted_label_ids
249
-
250
-
251
- @flax.struct.dataclass
252
- class FlaxDataCollatorSpeechSeq2SeqWithPadding:
253
- """
254
- Data collator that will dynamically pad the inputs received.
255
- Args:
256
- processor ([`Wav2Vec2Processor`])
257
- The processor used for proccessing the data.
258
- decoder_start_token_id (:obj: `int`)
259
- The begin-of-sentence of the decoder.
260
- input_padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
261
- Select a strategy to pad the returned input sequences (according to the model's padding side and padding index)
262
- among:
263
- * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
264
- sequence if provided).
265
- * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
266
- maximum acceptable input length for the model if that argument is not provided.
267
- * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
268
- different lengths).
269
- target_padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
270
- Select a strategy to pad the returned target sequences (according to the model's padding side and padding index).
271
- See above for details.
272
- max_input_length (:obj:`float`, `optional`):
273
- Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
274
- max_target_length (:obj:`int`, `optional`):
275
- Maximum length of the ``labels`` of the returned list and optionally padding length (see above).
276
- pad_input_to_multiple_of (:obj:`int`, `optional`):
277
- If set will pad the input sequence to a multiple of the provided value.
278
- This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
279
- 7.5 (Volta).
280
- pad_target_to_multiple_of (:obj:`int`, `optional`):
281
- If set will pad the target sequence to a multiple of the provided value.
282
- This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
283
- 7.5 (Volta).
284
- """
285
-
286
- processor: Any
287
- decoder_start_token_id: int
288
- input_padding: Union[bool, str] = "longest"
289
- target_padding: Union[bool, str] = "max_length"
290
- max_input_length: Optional[float] = None
291
- max_target_length: Optional[int] = None
292
- pad_input_to_multiple_of: Optional[int] = None
293
- pad_target_to_multiple_of: Optional[int] = None
294
-
295
- def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]:
296
- # split inputs and labels since they have to be of different lengths and need
297
- # different padding methods
298
- model_input_name = self.processor.model_input_names[0]
299
- input_features = {model_input_name: features[model_input_name]}
300
- label_features = {"input_ids": features["labels"]}
301
-
302
- # reformat list to dict and set to pytorch format
303
- batch = self.processor.feature_extractor.pad(
304
- input_features,
305
- max_length=self.max_input_length,
306
- padding=self.input_padding,
307
- pad_to_multiple_of=self.pad_input_to_multiple_of,
308
- return_tensors="np",
309
- )
310
-
311
- labels_batch = self.processor.tokenizer.pad(
312
- label_features,
313
- max_length=self.max_target_length,
314
- padding=self.target_padding,
315
- pad_to_multiple_of=self.pad_target_to_multiple_of,
316
- return_tensors="np",
317
- )
318
-
319
- # if bos token is appended in previous tokenization step,
320
- # cut bos token here as it's append later anyways
321
- labels = labels_batch["input_ids"]
322
- if (labels[:, 0] == self.decoder_start_token_id).all().item():
323
- labels = labels[:, 1:]
324
- labels_batch.attention_mask = labels_batch.attention_mask[:, 1:]
325
-
326
- decoder_input_ids = shift_tokens_right(labels, self.decoder_start_token_id)
327
-
328
- # replace padding with -100 to ignore correctly when computing the loss
329
- labels = np.ma.array(labels, mask=np.not_equal(labels_batch.attention_mask, 1))
330
- labels = labels.filled(fill_value=-100)
331
-
332
- batch["labels"] = labels
333
- batch["decoder_input_ids"] = decoder_input_ids
334
-
335
- return batch
336
-
337
-
338
- def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False, drop_last=True):
339
- """
340
- Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete,
341
- and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`.
342
- """
343
- if shuffle:
344
- batch_idx = jax.random.permutation(rng, len(dataset))
345
- batch_idx = np.asarray(batch_idx)
346
- else:
347
- batch_idx = np.arange(len(dataset))
348
-
349
- if drop_last:
350
- steps_per_epoch = len(dataset) // batch_size
351
- batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
352
- batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
353
- else:
354
- steps_per_epoch = math.ceil(len(dataset) / batch_size)
355
- batch_idx = np.array_split(batch_idx, steps_per_epoch)
356
-
357
- for idx in batch_idx:
358
- batch = dataset[idx]
359
- yield batch
360
-
361
-
362
- class TrainState(train_state.TrainState):
363
- dropout_rng: jnp.ndarray
364
-
365
- def replicate(self):
366
- return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
367
-
368
-
369
- def write_metric(summary_writer, train_metrics, eval_metrics, train_time, step):
370
- summary_writer.scalar("train_time", train_time, step)
371
- train_metrics = get_metrics(train_metrics)
372
- for key, vals in train_metrics.items():
373
- tag = f"train_{key}"
374
- for i, val in enumerate(vals):
375
- summary_writer.scalar(tag, val, step - len(vals) + i + 1)
376
-
377
- for metric_name, value in eval_metrics.items():
378
- summary_writer.scalar(f"eval_{metric_name}", value, step)
379
-
380
-
381
- def create_learning_rate_fn(
382
- num_train_steps: int, num_warmup_steps: int, learning_rate: float
383
- ) -> Callable[[int], jnp.array]:
384
- """Returns a linear warmup, linear_decay learning rate function."""
385
- warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
386
- decay_fn = optax.linear_schedule(
387
- init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
388
- )
389
- schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
390
- return schedule_fn
391
-
392
-
393
- def main():
394
- # 1. Parse input arguments
395
- # See all possible arguments in src/transformers/training_args.py
396
- # or by passing the --help flag to this script.
397
- # We now keep distinct sets of args, for a cleaner separation of concerns.
398
- parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
399
-
400
- if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
401
- # If we pass only one argument to the script and it's the path to a json file,
402
- # let's parse it to get our arguments.
403
- model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
404
- else:
405
- model_args, data_args, training_args = parser.parse_args_into_dataclasses()
406
-
407
- # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
408
- # information sent is the one passed as arguments along with your JAX/Flax versions.
409
- send_example_telemetry("run_speech_recognition_seq2seq", model_args, data_args, framework="flax")
410
-
411
- # 2. Setup logging
412
- # Make one log on every process with the configuration for debugging.
413
- logging.basicConfig(
414
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
415
- datefmt="%m/%d/%Y %H:%M:%S",
416
- handlers=[logging.StreamHandler(sys.stdout)],
417
- )
418
- # Set the verbosity to info of the Transformers logger.
419
- # We only want one process per machine to log things on the screen.
420
- logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
421
- if jax.process_index() == 0:
422
- datasets.utils.logging.set_verbosity_warning()
423
- transformers.utils.logging.set_verbosity_info()
424
- else:
425
- datasets.utils.logging.set_verbosity_error()
426
- transformers.utils.logging.set_verbosity_error()
427
-
428
- logger.info("Training/evaluation parameters %s", training_args)
429
-
430
- # Check the output dir is valid
431
- if (
432
- os.path.exists(training_args.output_dir)
433
- and os.listdir(training_args.output_dir)
434
- and training_args.do_train
435
- and not training_args.overwrite_output_dir
436
- ):
437
- raise ValueError(
438
- f"Output directory ({training_args.output_dir}) already exists and is not empty."
439
- "Use `--overwrite_output_dir` to overcome."
440
- )
441
-
442
- # Handle the repository creation
443
- if training_args.push_to_hub:
444
- if training_args.hub_model_id is None:
445
- repo_name = get_full_repo_name(
446
- Path(training_args.output_dir).absolute().name, token=training_args.hub_token
447
- )
448
- else:
449
- repo_name = training_args.hub_model_id
450
- create_repo(repo_name, exist_ok=True, token=training_args.hub_token)
451
- repo = Repository(training_args.output_dir, clone_from=repo_name, token=training_args.hub_token)
452
-
453
- # 3. Load dataset
454
- raw_datasets = DatasetDict()
455
-
456
- if training_args.do_train:
457
- raw_datasets["train"] = load_dataset(
458
- data_args.dataset_name,
459
- data_args.dataset_config_name,
460
- split=data_args.train_split_name,
461
- cache_dir=data_args.dataset_cache_dir,
462
- use_auth_token=True if model_args.use_auth_token else None,
463
- )
464
-
465
- if training_args.do_eval:
466
- raw_datasets["eval"] = load_dataset(
467
- data_args.dataset_name,
468
- data_args.dataset_config_name,
469
- split=data_args.eval_split_name,
470
- cache_dir=data_args.dataset_cache_dir,
471
- use_auth_token=True if model_args.use_auth_token else None,
472
- )
473
-
474
- if not training_args.do_train and not training_args.do_eval:
475
- raise ValueError(
476
- "Cannot not train and not do evaluation. At least one of training or evaluation has to be performed."
477
- )
478
-
479
- if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names:
480
- raise ValueError(
481
- f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
482
- "Make sure to set `--audio_column_name` to the correct audio column - one of "
483
- f"{', '.join(next(iter(raw_datasets.values())).column_names)}."
484
- )
485
-
486
- if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names:
487
- raise ValueError(
488
- f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
489
- "Make sure to set `--text_column_name` to the correct text column - one of "
490
- f"{', '.join(next(iter(raw_datasets.values())).column_names)}."
491
- )
492
-
493
- # 5. Load pretrained model, tokenizer, and feature extractor
494
- config = AutoConfig.from_pretrained(
495
- model_args.config_name if model_args.config_name else model_args.model_name_or_path,
496
- cache_dir=model_args.cache_dir,
497
- revision=model_args.model_revision,
498
- use_auth_token=True if model_args.use_auth_token else None,
499
- )
500
- feature_extractor = AutoFeatureExtractor.from_pretrained(
501
- model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
502
- cache_dir=model_args.cache_dir,
503
- revision=model_args.model_revision,
504
- use_auth_token=True if model_args.use_auth_token else None,
505
- )
506
- tokenizer = AutoTokenizer.from_pretrained(
507
- model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
508
- cache_dir=model_args.cache_dir,
509
- use_fast=model_args.use_fast_tokenizer,
510
- revision=model_args.model_revision,
511
- use_auth_token=True if model_args.use_auth_token else None,
512
- )
513
-
514
- model = FlaxAutoModelForSpeechSeq2Seq.from_pretrained(
515
- model_args.model_name_or_path,
516
- config=config,
517
- dtype=getattr(jnp, model_args.dtype),
518
- cache_dir=model_args.cache_dir,
519
- revision=model_args.model_revision,
520
- use_auth_token=True if model_args.use_auth_token else None,
521
- )
522
-
523
- if model.config.decoder_start_token_id is None:
524
- raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
525
-
526
- # 6. Resample speech dataset: `datasets` takes care of automatically loading and resampling the audio,
527
- # so we just need to set the correct target sampling rate.
528
- raw_datasets = raw_datasets.cast_column(
529
- data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
530
- )
531
-
532
- # 7. Preprocessing the datasets.
533
- # We need to read the audio files as arrays and tokenize the targets.
534
- max_input_length = int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate)
535
- min_input_length = int(data_args.min_duration_in_seconds * feature_extractor.sampling_rate)
536
- max_label_length = (
537
- data_args.max_label_length if data_args.max_label_length is not None else model.config.max_length
538
- )
539
- pad_input_to_multiple_of = data_args.pad_input_to_multiple_of
540
- pad_target_to_multiple_of = data_args.pad_target_to_multiple_of
541
- audio_column_name = data_args.audio_column_name
542
- num_workers = data_args.preprocessing_num_workers
543
- text_column_name = data_args.text_column_name
544
- model_input_name = feature_extractor.model_input_names[0]
545
- do_lower_case = data_args.do_lower_case
546
-
547
- if training_args.do_train and data_args.max_train_samples is not None:
548
- raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
549
-
550
- if training_args.do_eval and data_args.max_eval_samples is not None:
551
- raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
552
-
553
- if data_args.language is not None:
554
- # We only need to set the task id when the language is specified (i.e. in a multilingual setting)
555
- tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task)
556
-
557
- def prepare_dataset(batch):
558
- # process audio
559
- sample = batch[audio_column_name]
560
- inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
561
- # process audio length
562
- batch[model_input_name] = inputs.get(model_input_name)[0]
563
- batch["input_length"] = len(sample["array"])
564
-
565
- # process targets
566
- input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name]
567
- batch["labels"] = tokenizer(input_str).input_ids
568
- return batch
569
-
570
- vectorized_datasets = raw_datasets.map(
571
- prepare_dataset,
572
- remove_columns=next(iter(raw_datasets.values())).column_names,
573
- num_proc=num_workers,
574
- desc="preprocess train dataset",
575
- )
576
-
577
- # filter training data with inputs longer than max_input_length
578
- def is_audio_in_length_range(length):
579
- return min_input_length < length < max_input_length
580
-
581
- vectorized_datasets = vectorized_datasets.filter(
582
- is_audio_in_length_range,
583
- num_proc=num_workers,
584
- input_columns=["input_length"],
585
- )
586
-
587
- # for large datasets it is advised to run the preprocessing on a
588
- # single machine first with `args.preprocessing_only` since there will mostly likely
589
- # be a timeout when running the script in distributed mode.
590
- # In a second step `args.preprocessing_only` can then be set to `False` to load the
591
- # cached dataset
592
- if data_args.preprocessing_only:
593
- cache = {k: v.cache_files for k, v in vectorized_datasets.items()}
594
- logger.info(f"Data preprocessing finished. Files cached at {cache}.")
595
- return
596
-
597
- # 8. Load Metric
598
- metric = load_metric("wer")
599
-
600
- def compute_metrics(preds, labels):
601
- for ind in range(len(labels)):
602
- labels[ind][labels[ind] == -100] = tokenizer.pad_token_id
603
-
604
- pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True)
605
- # we do not want to group tokens when computing the metrics
606
-
607
- try:
608
- label_str = tokenizer.batch_decode(labels, skip_special_tokens=True)
609
- except:
610
- breakpoint()
611
-
612
- wer = metric.compute(predictions=pred_str, references=label_str)
613
- return {"wer": wer}
614
-
615
- # 9. Save feature extractor, tokenizer and config
616
- feature_extractor.save_pretrained(training_args.output_dir)
617
- tokenizer.save_pretrained(training_args.output_dir)
618
- config.save_pretrained(training_args.output_dir)
619
-
620
- processor = AutoProcessor.from_pretrained(training_args.output_dir)
621
-
622
- data_collator = FlaxDataCollatorSpeechSeq2SeqWithPadding(
623
- processor=processor,
624
- decoder_start_token_id=model.config.decoder_start_token_id,
625
- input_padding="longest",
626
- target_padding="longest",
627
- max_target_length=max_label_length,
628
- pad_input_to_multiple_of=pad_input_to_multiple_of,
629
- pad_target_to_multiple_of=pad_target_to_multiple_of if pad_target_to_multiple_of else max_label_length,
630
- )
631
-
632
- # Enable tensorboard only on the master node
633
- has_tensorboard = is_tensorboard_available()
634
- if has_tensorboard and jax.process_index() == 0:
635
- try:
636
- from flax.metrics.tensorboard import SummaryWriter
637
-
638
- summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
639
- except ImportError as ie:
640
- has_tensorboard = False
641
- logger.warning(
642
- f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
643
- )
644
- else:
645
- logger.warning(
646
- "Unable to display metrics through TensorBoard because the package is not installed: "
647
- "Please run pip install tensorboard to enable."
648
- )
649
-
650
- # Initialize our training
651
- rng = jax.random.PRNGKey(training_args.seed)
652
- rng, dropout_rng = jax.random.split(rng)
653
-
654
- # Store some constant
655
- num_epochs = int(training_args.num_train_epochs)
656
- train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
657
- per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
658
- eval_batch_size = per_device_eval_batch_size * jax.device_count()
659
- steps_per_epoch = len(vectorized_datasets["train"]) // train_batch_size
660
- total_train_steps = steps_per_epoch * num_epochs
661
-
662
- # Create learning rate schedule
663
- linear_decay_lr_schedule_fn = create_learning_rate_fn(
664
- len(vectorized_datasets["train"]),
665
- training_args.warmup_steps,
666
- training_args.learning_rate,
667
- )
668
-
669
- # We use Optax's "masking" functionality to not apply weight decay
670
- # to bias and LayerNorm scale parameters. decay_mask_fn returns a
671
- # mask boolean with the same structure as the parameters.
672
- # The mask is True for parameters that should be decayed.
673
- def decay_mask_fn(params):
674
- flat_params = traverse_util.flatten_dict(params)
675
- # find out all LayerNorm parameters
676
- layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
677
- layer_norm_named_params = set(
678
- [
679
- layer[-2:]
680
- for layer_norm_name in layer_norm_candidates
681
- for layer in flat_params.keys()
682
- if layer_norm_name in "".join(layer).lower()
683
- ]
684
- )
685
- flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
686
- return traverse_util.unflatten_dict(flat_mask)
687
-
688
- # create adam optimizer
689
- adamw = optax.adamw(
690
- learning_rate=linear_decay_lr_schedule_fn,
691
- b1=training_args.adam_beta1,
692
- b2=training_args.adam_beta2,
693
- eps=training_args.adam_epsilon,
694
- weight_decay=training_args.weight_decay,
695
- mask=decay_mask_fn,
696
- )
697
-
698
- # Setup train state
699
- state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng)
700
-
701
- # label smoothed cross entropy
702
- def loss_fn(logits, labels, label_smoothing_factor=0.0):
703
- """
704
- The label smoothing implementation is adapted from Flax's official example:
705
- https://github.com/google/flax/blob/87a211135c6a377c8f29048a1cac3840e38b9da4/examples/wmt/train.py#L104
706
- """
707
- vocab_size = logits.shape[-1]
708
- confidence = 1.0 - label_smoothing_factor
709
- low_confidence = (1.0 - confidence) / (vocab_size - 1)
710
- normalizing_constant = -(
711
- confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)
712
- )
713
- soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence)
714
-
715
- loss = optax.softmax_cross_entropy(logits, soft_labels)
716
- loss = loss - normalizing_constant
717
-
718
- # ignore padded tokens from loss, i.e. where labels are not set to -100
719
- padding_mask = labels >= 0
720
- loss = loss * padding_mask
721
- loss = loss.sum()
722
- num_labels = padding_mask.sum()
723
- return loss, num_labels
724
-
725
- # Define gradient update step fn
726
- def train_step(state, batch, label_smoothing_factor=0.0):
727
- dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)
728
-
729
- def compute_loss(params):
730
- labels = batch.pop("labels")
731
- logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
732
- loss, num_labels = loss_fn(logits, labels, label_smoothing_factor)
733
- return loss, num_labels
734
-
735
- grad_fn = jax.value_and_grad(compute_loss, has_aux=True)
736
- (loss, num_labels), grad = grad_fn(state.params)
737
- num_labels = jax.lax.psum(num_labels, "batch")
738
-
739
- # true loss = total loss / total samples
740
- loss = jax.lax.psum(loss, "batch")
741
- loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
742
-
743
- # true grad = total grad / total samples
744
- grad = jax.lax.psum(grad, "batch")
745
- grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad)
746
- new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
747
-
748
- metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
749
- return new_state, metrics
750
-
751
- # Define eval fn
752
- def eval_step(params, batch, label_smoothing_factor=0.0):
753
- labels = batch.pop("labels")
754
- logits = model(**batch, params=params, train=False)[0]
755
-
756
- loss, num_labels = loss_fn(logits, labels, label_smoothing_factor)
757
- num_labels = jax.lax.psum(num_labels, "batch")
758
-
759
- # true loss = total loss / total samples
760
- loss = jax.lax.psum(loss, "batch")
761
- loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
762
-
763
- metrics = {"loss": loss}
764
- return metrics
765
-
766
- # Define generation function
767
- num_beams = model_args.num_beams if model_args.num_beams is not None else model.config.num_beams
768
- gen_kwargs = {"max_length": max_label_length, "num_beams": num_beams}
769
-
770
- def generate_step(params, batch):
771
- model.params = params
772
- output_ids = model.generate(batch[model_input_name], attention_mask=batch.get("attention_mask"), **gen_kwargs)
773
- return output_ids.sequences
774
-
775
- # Create parallel version of the train and eval step
776
- p_train_step = jax.pmap(
777
- partial(train_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch", donate_argnums=(0,)
778
- )
779
- p_eval_step = jax.pmap(partial(eval_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch")
780
- p_generate_step = jax.pmap(generate_step, "batch")
781
-
782
- # Replicate the train state on each device
783
- state = state.replicate()
784
-
785
- logger.info("***** Running training *****")
786
- logger.info(f" Num examples = {len(vectorized_datasets['train'])}")
787
- logger.info(f" Num Epochs = {num_epochs}")
788
- logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
789
- logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}")
790
- logger.info(f" Total optimization steps = {total_train_steps}")
791
-
792
- train_time = 0
793
- epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
794
- for epoch in epochs:
795
- # ======================== Training ================================
796
- train_start = time.time()
797
-
798
- # Create sampling rng
799
- rng, input_rng = jax.random.split(rng)
800
- train_metrics = []
801
-
802
- # Generate an epoch by shuffling sampling indices from the train dataset
803
- train_loader = data_loader(input_rng, vectorized_datasets["train"], train_batch_size, shuffle=True)
804
- # train
805
- for _ in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False):
806
- samples = next(train_loader)
807
- batch = data_collator(samples)
808
- batch = shard(batch.data)
809
- state, train_metric = p_train_step(state, batch)
810
- train_metrics.append(train_metric)
811
-
812
- train_time += time.time() - train_start
813
-
814
- train_metrics = unreplicate(train_metrics)
815
-
816
- if train_metrics:
817
- epochs.write(
818
- f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metrics['loss']}, Learning Rate:"
819
- f" {train_metrics['learning_rate']})"
820
- )
821
-
822
- # ======================== Evaluating ==============================
823
- eval_metrics = []
824
- eval_preds = []
825
- eval_labels = []
826
-
827
- eval_loader = data_loader(input_rng, vectorized_datasets["eval"], eval_batch_size, drop_last=False)
828
- eval_steps = math.ceil(len(vectorized_datasets["eval"]) / eval_batch_size)
829
- for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
830
- # Model forward
831
- samples = next(eval_loader)
832
- batch = data_collator(samples)
833
- labels = batch["labels"]
834
-
835
- metrics = pad_shard_unpad(p_eval_step, static_return=True)(
836
- state.params, batch.data, min_device_batch=per_device_eval_batch_size
837
- )
838
- eval_metrics.append(metrics)
839
-
840
- # generation
841
- if training_args.predict_with_generate:
842
- generated_ids = pad_shard_unpad(p_generate_step)(state.params, batch.data)
843
- eval_preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"])))
844
- eval_labels.extend(labels)
845
-
846
- # normalize eval metrics
847
- eval_metrics = get_metrics(eval_metrics)
848
- eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics)
849
-
850
- # compute WER metric
851
- wer_desc = ""
852
- if training_args.predict_with_generate:
853
- wer_metric = compute_metrics(eval_preds, eval_labels)
854
- eval_metrics.update(wer_metric)
855
- wer_desc = " ".join([f"Eval {key}: {value} |" for key, value in wer_metric.items()])
856
-
857
- # Print metrics and update progress bar
858
- desc = f"Epoch... ({epoch + 1}/{num_epochs} | Eval Loss: {eval_metrics['loss']} | {wer_desc})"
859
- epochs.write(desc)
860
- epochs.desc = desc
861
-
862
- # Save metrics
863
- if has_tensorboard and jax.process_index() == 0:
864
- cur_step = epoch * (len(vectorized_datasets["train"]) // train_batch_size)
865
- if train_metrics:
866
- write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step)
867
- else:
868
- print(f"Train metrics not written because currently it is empty.")
869
-
870
- # save checkpoint after each epoch and push checkpoint to the hub
871
- if jax.process_index() == 0:
872
- params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params))
873
- model.save_pretrained(training_args.output_dir, params=params)
874
- tokenizer.save_pretrained(training_args.output_dir)
875
- if training_args.push_to_hub:
876
- repo.push_to_hub(commit_message=f"Saving weights and logs of epoch {epoch}", blocking=False)
877
-
878
-
879
- if __name__ == "__main__":
880
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_run_flax_speech_recognition_seq2seq_streaming.py DELETED
@@ -1,874 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """
17
- Fine-tuning the Flax library models for sequence to sequence speech recognition.
18
- """
19
- # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
20
-
21
- import logging
22
- import math
23
- import os
24
- import sys
25
- import time
26
- from dataclasses import field
27
- from functools import partial
28
- from pathlib import Path
29
- from typing import Any, Callable, Dict, List, Optional, Union
30
-
31
- import datasets
32
- import flax
33
- import jax
34
- import jax.numpy as jnp
35
- import numpy as np
36
- import optax
37
- from datasets import Dataset, DatasetDict, load_dataset, load_metric
38
- from flax import jax_utils, traverse_util
39
- from flax.jax_utils import pad_shard_unpad, unreplicate
40
- from flax.training import train_state
41
- from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
42
- from huggingface_hub import Repository, create_repo
43
- from tqdm import tqdm
44
-
45
- import transformers
46
- from transformers import (
47
- AutoConfig,
48
- AutoFeatureExtractor,
49
- AutoProcessor,
50
- AutoTokenizer,
51
- FlaxAutoModelForSpeechSeq2Seq,
52
- HfArgumentParser,
53
- Seq2SeqTrainingArguments,
54
- is_tensorboard_available,
55
- )
56
- from transformers.file_utils import get_full_repo_name
57
- from transformers.utils import check_min_version, send_example_telemetry
58
- from transformers.utils.versions import require_version
59
-
60
-
61
- # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
62
- check_min_version("4.27.0.dev0")
63
-
64
- require_version("datasets>=1.18.0", "To fix: pip install -r examples/flax/speech-recogintion/requirements.txt")
65
-
66
- logger = logging.getLogger(__name__)
67
-
68
-
69
- @flax.struct.dataclass
70
- class ModelArguments:
71
- """
72
- Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
73
- """
74
-
75
- model_name_or_path: str = field(
76
- metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
77
- )
78
- config_name: Optional[str] = field(
79
- default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
80
- )
81
- tokenizer_name: Optional[str] = field(
82
- default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
83
- )
84
- feature_extractor_name: Optional[str] = field(
85
- default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
86
- )
87
- cache_dir: Optional[str] = field(
88
- default=None,
89
- metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
90
- )
91
- use_fast_tokenizer: bool = field(
92
- default=True,
93
- metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
94
- )
95
- model_revision: str = field(
96
- default="main",
97
- metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
98
- )
99
- use_auth_token: bool = field(
100
- default=False,
101
- metadata={
102
- "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
103
- "with private models)."
104
- },
105
- )
106
- dtype: Optional[str] = field(
107
- default="float32",
108
- metadata={
109
- "help": (
110
- "Floating-point format in which the model weights should be initialized and trained. Choose one of"
111
- " `[float32, float16, bfloat16]`."
112
- )
113
- },
114
- )
115
- num_beams: Optional[int] = field(
116
- default=None,
117
- metadata={
118
- "help": (
119
- "Number of beams to use for evaluation. This argument will be passed to `model.generate`, "
120
- "which is used during evaluation."
121
- )
122
- },
123
- )
124
-
125
-
126
- @flax.struct.dataclass
127
- class DataTrainingArguments:
128
- """
129
- Arguments pertaining to what data we are going to input our model for training and eval.
130
- """
131
-
132
- dataset_name: str = field(
133
- default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
134
- )
135
- dataset_config_name: Optional[str] = field(
136
- default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
137
- )
138
- text_column: Optional[str] = field(
139
- default=None,
140
- metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
141
- )
142
- dataset_cache_dir: Optional[str] = field(
143
- default=None, metadata={"help": "Path to cache directory for saving and loading datasets"}
144
- )
145
- overwrite_cache: bool = field(
146
- default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
147
- )
148
- preprocessing_num_workers: Optional[int] = field(
149
- default=None,
150
- metadata={"help": "The number of processes to use for the preprocessing."},
151
- )
152
- max_train_samples: Optional[int] = field(
153
- default=None,
154
- metadata={
155
- "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
156
- "value if set."
157
- },
158
- )
159
- max_eval_samples: Optional[int] = field(
160
- default=None,
161
- metadata={
162
- "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
163
- "value if set."
164
- },
165
- )
166
- audio_column_name: str = field(
167
- default="audio",
168
- metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
169
- )
170
- text_column_name: str = field(
171
- default="text",
172
- metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
173
- )
174
- max_duration_in_seconds: float = field(
175
- default=20.0,
176
- metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"},
177
- )
178
- min_duration_in_seconds: float = field(
179
- default=0.0,
180
- metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"},
181
- )
182
- max_label_length: float = field(
183
- default=128,
184
- metadata={"help": "Truncate transcriptions that are longer `max_eval_length` tokens."},
185
- )
186
- pad_input_to_multiple_of: Optional[int] = field(
187
- default=None,
188
- metadata={
189
- "help": "If set will pad the input sequence to a multiple of the provided value. "
190
- "This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the inputs to max length."
191
- },
192
- )
193
- pad_target_to_multiple_of: Optional[int] = field(
194
- default=None,
195
- metadata={
196
- "help": "If set will pad the target sequence to a multiple of the provided value. "
197
- "This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the targets to max length."
198
- },
199
- )
200
- preprocessing_only: bool = field(
201
- default=False,
202
- metadata={
203
- "help": "Whether to only do data preprocessing and skip training. "
204
- "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
205
- "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
206
- "so that the cached datasets can consequently be loaded in distributed training"
207
- },
208
- )
209
- train_split_name: str = field(
210
- default="train",
211
- metadata={
212
- "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
213
- },
214
- )
215
- eval_split_name: str = field(
216
- default="validation",
217
- metadata={
218
- "help": "The name of the evaluation data set split to use (via the datasets library). Defaults to 'validation'"
219
- },
220
- )
221
- do_lower_case: bool = field(
222
- default=True,
223
- metadata={"help": "Whether the target text should be lower cased."},
224
- )
225
- language: str = field(
226
- default=None,
227
- metadata={
228
- "help": (
229
- "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning "
230
- "only. For English speech recognition, it should be set to `None`."
231
- )
232
- },
233
- )
234
- task: str = field(
235
- default="transcribe",
236
- metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."},
237
- )
238
-
239
-
240
- def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray:
241
- """
242
- Shift label ids one token to the right.
243
- """
244
- shifted_label_ids = np.zeros_like(label_ids)
245
- shifted_label_ids[:, 1:] = label_ids[:, :-1]
246
- shifted_label_ids[:, 0] = decoder_start_token_id
247
-
248
- return shifted_label_ids
249
-
250
-
251
- @flax.struct.dataclass
252
- class FlaxDataCollatorSpeechSeq2SeqWithPadding:
253
- """
254
- Data collator that will dynamically pad the inputs received.
255
- Args:
256
- processor ([`Wav2Vec2Processor`])
257
- The processor used for proccessing the data.
258
- decoder_start_token_id (:obj: `int`)
259
- The begin-of-sentence of the decoder.
260
- input_padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
261
- Select a strategy to pad the returned input sequences (according to the model's padding side and padding index)
262
- among:
263
- * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
264
- sequence if provided).
265
- * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
266
- maximum acceptable input length for the model if that argument is not provided.
267
- * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
268
- different lengths).
269
- target_padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
270
- Select a strategy to pad the returned target sequences (according to the model's padding side and padding index).
271
- See above for details.
272
- max_input_length (:obj:`float`, `optional`):
273
- Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
274
- max_target_length (:obj:`int`, `optional`):
275
- Maximum length of the ``labels`` of the returned list and optionally padding length (see above).
276
- pad_input_to_multiple_of (:obj:`int`, `optional`):
277
- If set will pad the input sequence to a multiple of the provided value.
278
- This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
279
- 7.5 (Volta).
280
- pad_target_to_multiple_of (:obj:`int`, `optional`):
281
- If set will pad the target sequence to a multiple of the provided value.
282
- This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
283
- 7.5 (Volta).
284
- """
285
-
286
- processor: Any
287
- decoder_start_token_id: int
288
- input_padding: Union[bool, str] = "longest"
289
- target_padding: Union[bool, str] = "max_length"
290
- max_input_length: Optional[float] = None
291
- max_target_length: Optional[int] = None
292
- pad_input_to_multiple_of: Optional[int] = None
293
- pad_target_to_multiple_of: Optional[int] = None
294
-
295
- def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]:
296
- # split inputs and labels since they have to be of different lengths and need
297
- # different padding methods
298
- model_input_name = self.processor.model_input_names[0]
299
- input_features = {model_input_name: features[model_input_name]}
300
- label_features = {"input_ids": features["labels"]}
301
-
302
- # reformat list to dict and set to pytorch format
303
- batch = self.processor.feature_extractor.pad(
304
- input_features,
305
- max_length=self.max_input_length,
306
- padding=self.input_padding,
307
- pad_to_multiple_of=self.pad_input_to_multiple_of,
308
- return_tensors="np",
309
- )
310
-
311
- labels_batch = self.processor.tokenizer.pad(
312
- label_features,
313
- max_length=self.max_target_length,
314
- padding=self.target_padding,
315
- pad_to_multiple_of=self.pad_target_to_multiple_of,
316
- return_tensors="np",
317
- )
318
-
319
- # if bos token is appended in previous tokenization step,
320
- # cut bos token here as it's append later anyways
321
- labels = labels_batch["input_ids"]
322
- if (labels[:, 0] == self.decoder_start_token_id).all().item():
323
- labels = labels[:, 1:]
324
- labels_batch.attention_mask = labels_batch.attention_mask[:, 1:]
325
-
326
- decoder_input_ids = shift_tokens_right(labels, self.decoder_start_token_id)
327
-
328
- # replace padding with -100 to ignore correctly when computing the loss
329
- labels = np.ma.array(labels, mask=np.not_equal(labels_batch.attention_mask, 1))
330
- labels = labels.filled(fill_value=-100)
331
-
332
- batch["labels"] = labels
333
- batch["decoder_input_ids"] = decoder_input_ids
334
-
335
- return batch
336
-
337
-
338
- def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False, drop_last=True):
339
- """
340
- Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete,
341
- and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`.
342
- """
343
- if shuffle:
344
- batch_idx = jax.random.permutation(rng, len(dataset))
345
- batch_idx = np.asarray(batch_idx)
346
- else:
347
- batch_idx = np.arange(len(dataset))
348
-
349
- if drop_last:
350
- steps_per_epoch = len(dataset) // batch_size
351
- batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
352
- batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
353
- else:
354
- steps_per_epoch = math.ceil(len(dataset) / batch_size)
355
- batch_idx = np.array_split(batch_idx, steps_per_epoch)
356
-
357
- for idx in batch_idx:
358
- batch = dataset[idx]
359
- yield batch
360
-
361
-
362
- class TrainState(train_state.TrainState):
363
- dropout_rng: jnp.ndarray
364
-
365
- def replicate(self):
366
- return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
367
-
368
-
369
- def write_metric(summary_writer, train_metrics, eval_metrics, train_time, step):
370
- summary_writer.scalar("train_time", train_time, step)
371
-
372
- train_metrics = get_metrics(train_metrics)
373
- for key, vals in train_metrics.items():
374
- tag = f"train_{key}"
375
- for i, val in enumerate(vals):
376
- summary_writer.scalar(tag, val, step - len(vals) + i + 1)
377
-
378
- for metric_name, value in eval_metrics.items():
379
- summary_writer.scalar(f"eval_{metric_name}", value, step)
380
-
381
-
382
- def create_learning_rate_fn(
383
- num_train_steps: int, num_warmup_steps: int, learning_rate: float
384
- ) -> Callable[[int], jnp.array]:
385
- """Returns a linear warmup, linear_decay learning rate function."""
386
- warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
387
- decay_fn = optax.linear_schedule(
388
- init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
389
- )
390
- schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
391
- return schedule_fn
392
-
393
-
394
- def main():
395
- # 1. Parse input arguments
396
- # See all possible arguments in src/transformers/training_args.py
397
- # or by passing the --help flag to this script.
398
- # We now keep distinct sets of args, for a cleaner separation of concerns.
399
- parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
400
-
401
- if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
402
- # If we pass only one argument to the script and it's the path to a json file,
403
- # let's parse it to get our arguments.
404
- model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
405
- else:
406
- model_args, data_args, training_args = parser.parse_args_into_dataclasses()
407
-
408
- # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
409
- # information sent is the one passed as arguments along with your JAX/Flax versions.
410
- send_example_telemetry("run_speech_recognition_seq2seq", model_args, data_args, framework="flax")
411
-
412
- # 2. Setup logging
413
- # Make one log on every process with the configuration for debugging.
414
- logging.basicConfig(
415
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
416
- datefmt="%m/%d/%Y %H:%M:%S",
417
- handlers=[logging.StreamHandler(sys.stdout)],
418
- )
419
- # Set the verbosity to info of the Transformers logger.
420
- # We only want one process per machine to log things on the screen.
421
- logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
422
- if jax.process_index() == 0:
423
- datasets.utils.logging.set_verbosity_warning()
424
- transformers.utils.logging.set_verbosity_info()
425
- else:
426
- datasets.utils.logging.set_verbosity_error()
427
- transformers.utils.logging.set_verbosity_error()
428
-
429
- logger.info("Training/evaluation parameters %s", training_args)
430
-
431
- # Check the output dir is valid
432
- if (
433
- os.path.exists(training_args.output_dir)
434
- and os.listdir(training_args.output_dir)
435
- and training_args.do_train
436
- and not training_args.overwrite_output_dir
437
- ):
438
- raise ValueError(
439
- f"Output directory ({training_args.output_dir}) already exists and is not empty."
440
- "Use `--overwrite_output_dir` to overcome."
441
- )
442
-
443
- # Handle the repository creation
444
- if training_args.push_to_hub:
445
- if training_args.hub_model_id is None:
446
- repo_name = get_full_repo_name(
447
- Path(training_args.output_dir).absolute().name, token=training_args.hub_token
448
- )
449
- else:
450
- repo_name = training_args.hub_model_id
451
- create_repo(repo_name, exist_ok=True, token=training_args.hub_token)
452
- repo = Repository(training_args.output_dir, clone_from=repo_name, token=training_args.hub_token)
453
-
454
- # 3. Load dataset
455
- raw_datasets = DatasetDict()
456
-
457
- if training_args.do_train:
458
- raw_datasets["train"] = load_dataset(
459
- data_args.dataset_name,
460
- data_args.dataset_config_name,
461
- split=data_args.train_split_name,
462
- cache_dir=data_args.dataset_cache_dir,
463
- use_auth_token=True if model_args.use_auth_token else None,
464
- )
465
-
466
- if training_args.do_eval:
467
- raw_datasets["eval"] = load_dataset(
468
- data_args.dataset_name,
469
- data_args.dataset_config_name,
470
- split=data_args.eval_split_name,
471
- cache_dir=data_args.dataset_cache_dir,
472
- use_auth_token=True if model_args.use_auth_token else None,
473
- )
474
-
475
- if not training_args.do_train and not training_args.do_eval:
476
- raise ValueError(
477
- "Cannot not train and not do evaluation. At least one of training or evaluation has to be performed."
478
- )
479
-
480
- if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names:
481
- raise ValueError(
482
- f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
483
- "Make sure to set `--audio_column_name` to the correct audio column - one of "
484
- f"{', '.join(next(iter(raw_datasets.values())).column_names)}."
485
- )
486
-
487
- if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names:
488
- raise ValueError(
489
- f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
490
- "Make sure to set `--text_column_name` to the correct text column - one of "
491
- f"{', '.join(next(iter(raw_datasets.values())).column_names)}."
492
- )
493
-
494
- # 5. Load pretrained model, tokenizer, and feature extractor
495
- config = AutoConfig.from_pretrained(
496
- model_args.config_name if model_args.config_name else model_args.model_name_or_path,
497
- cache_dir=model_args.cache_dir,
498
- revision=model_args.model_revision,
499
- use_auth_token=True if model_args.use_auth_token else None,
500
- )
501
- feature_extractor = AutoFeatureExtractor.from_pretrained(
502
- model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
503
- cache_dir=model_args.cache_dir,
504
- revision=model_args.model_revision,
505
- use_auth_token=True if model_args.use_auth_token else None,
506
- )
507
- tokenizer = AutoTokenizer.from_pretrained(
508
- model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
509
- cache_dir=model_args.cache_dir,
510
- use_fast=model_args.use_fast_tokenizer,
511
- revision=model_args.model_revision,
512
- use_auth_token=True if model_args.use_auth_token else None,
513
- )
514
-
515
- model = FlaxAutoModelForSpeechSeq2Seq.from_pretrained(
516
- model_args.model_name_or_path,
517
- config=config,
518
- dtype=getattr(jnp, model_args.dtype),
519
- cache_dir=model_args.cache_dir,
520
- revision=model_args.model_revision,
521
- use_auth_token=True if model_args.use_auth_token else None,
522
- )
523
-
524
- if model.config.decoder_start_token_id is None:
525
- raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
526
-
527
- # 6. Resample speech dataset: `datasets` takes care of automatically loading and resampling the audio,
528
- # so we just need to set the correct target sampling rate.
529
- raw_datasets = raw_datasets.cast_column(
530
- data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
531
- )
532
-
533
- # 7. Preprocessing the datasets.
534
- # We need to read the audio files as arrays and tokenize the targets.
535
- max_input_length = int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate)
536
- min_input_length = int(data_args.min_duration_in_seconds * feature_extractor.sampling_rate)
537
- max_label_length = (
538
- data_args.max_label_length if data_args.max_label_length is not None else model.config.max_length
539
- )
540
- pad_input_to_multiple_of = data_args.pad_input_to_multiple_of
541
- pad_target_to_multiple_of = data_args.pad_target_to_multiple_of
542
- audio_column_name = data_args.audio_column_name
543
- num_workers = data_args.preprocessing_num_workers
544
- text_column_name = data_args.text_column_name
545
- model_input_name = feature_extractor.model_input_names[0]
546
- do_lower_case = data_args.do_lower_case
547
-
548
- if training_args.do_train and data_args.max_train_samples is not None:
549
- raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
550
-
551
- if training_args.do_eval and data_args.max_eval_samples is not None:
552
- raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
553
-
554
- if data_args.language is not None:
555
- # We only need to set the task id when the language is specified (i.e. in a multilingual setting)
556
- tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task)
557
-
558
- def prepare_dataset(batch):
559
- # process audio
560
- sample = batch[audio_column_name]
561
- inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
562
- # process audio length
563
- batch[model_input_name] = inputs.get(model_input_name)[0]
564
- batch["input_length"] = len(sample["array"])
565
-
566
- # process targets
567
- input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name]
568
- batch["labels"] = tokenizer(input_str).input_ids
569
- return batch
570
-
571
- vectorized_datasets = raw_datasets.map(
572
- prepare_dataset,
573
- remove_columns=next(iter(raw_datasets.values())).column_names,
574
- num_proc=num_workers,
575
- desc="preprocess train dataset",
576
- )
577
-
578
- # filter training data with inputs longer than max_input_length
579
- def is_audio_in_length_range(length):
580
- return min_input_length < length < max_input_length
581
-
582
- vectorized_datasets = vectorized_datasets.filter(
583
- is_audio_in_length_range,
584
- num_proc=num_workers,
585
- input_columns=["input_length"],
586
- )
587
-
588
- # for large datasets it is advised to run the preprocessing on a
589
- # single machine first with `args.preprocessing_only` since there will mostly likely
590
- # be a timeout when running the script in distributed mode.
591
- # In a second step `args.preprocessing_only` can then be set to `False` to load the
592
- # cached dataset
593
- if data_args.preprocessing_only:
594
- cache = {k: v.cache_files for k, v in vectorized_datasets.items()}
595
- logger.info(f"Data preprocessing finished. Files cached at {cache}.")
596
- return
597
-
598
- # 8. Load Metric
599
- metric = load_metric("wer")
600
-
601
- def compute_metrics(preds, labels):
602
- # replace padded labels by the padding token
603
- for idx in range(len(labels)):
604
- labels[idx][labels[idx] == -100] = tokenizer.pad_token_id
605
-
606
- pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True)
607
- # we do not want to group tokens when computing the metrics
608
- label_str = tokenizer.batch_decode(labels, skip_special_tokens=True)
609
-
610
- wer = metric.compute(predictions=pred_str, references=label_str)
611
- return {"wer": wer}
612
-
613
- # 9. Save feature extractor, tokenizer and config
614
- feature_extractor.save_pretrained(training_args.output_dir)
615
- tokenizer.save_pretrained(training_args.output_dir)
616
- config.save_pretrained(training_args.output_dir)
617
-
618
- processor = AutoProcessor.from_pretrained(training_args.output_dir)
619
-
620
- data_collator = FlaxDataCollatorSpeechSeq2SeqWithPadding(
621
- processor=processor,
622
- decoder_start_token_id=model.config.decoder_start_token_id,
623
- input_padding="longest",
624
- target_padding="longest",
625
- max_target_length=max_label_length,
626
- pad_input_to_multiple_of=pad_input_to_multiple_of,
627
- pad_target_to_multiple_of=pad_target_to_multiple_of if pad_target_to_multiple_of else max_label_length,
628
- )
629
-
630
- # Enable tensorboard only on the master node
631
- has_tensorboard = is_tensorboard_available()
632
- if has_tensorboard and jax.process_index() == 0:
633
- try:
634
- from flax.metrics.tensorboard import SummaryWriter
635
-
636
- summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
637
- except ImportError as ie:
638
- has_tensorboard = False
639
- logger.warning(
640
- f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
641
- )
642
- else:
643
- logger.warning(
644
- "Unable to display metrics through TensorBoard because the package is not installed: "
645
- "Please run pip install tensorboard to enable."
646
- )
647
-
648
- # Initialize our training
649
- rng = jax.random.PRNGKey(training_args.seed)
650
- rng, dropout_rng = jax.random.split(rng)
651
-
652
- # Store some constant
653
- num_epochs = int(training_args.num_train_epochs)
654
- train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
655
- per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
656
- eval_batch_size = per_device_eval_batch_size * jax.device_count()
657
- steps_per_epoch = len(vectorized_datasets["train"]) // train_batch_size
658
- total_train_steps = steps_per_epoch * num_epochs
659
-
660
- # Create learning rate schedule
661
- linear_decay_lr_schedule_fn = create_learning_rate_fn(
662
- len(vectorized_datasets["train"]),
663
- training_args.warmup_steps,
664
- training_args.learning_rate,
665
- )
666
-
667
- # We use Optax's "masking" functionality to not apply weight decay
668
- # to bias and LayerNorm scale parameters. decay_mask_fn returns a
669
- # mask boolean with the same structure as the parameters.
670
- # The mask is True for parameters that should be decayed.
671
- def decay_mask_fn(params):
672
- flat_params = traverse_util.flatten_dict(params)
673
- # find out all LayerNorm parameters
674
- layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
675
- layer_norm_named_params = set(
676
- [
677
- layer[-2:]
678
- for layer_norm_name in layer_norm_candidates
679
- for layer in flat_params.keys()
680
- if layer_norm_name in "".join(layer).lower()
681
- ]
682
- )
683
- flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
684
- return traverse_util.unflatten_dict(flat_mask)
685
-
686
- # create adam optimizer
687
- adamw = optax.adamw(
688
- learning_rate=linear_decay_lr_schedule_fn,
689
- b1=training_args.adam_beta1,
690
- b2=training_args.adam_beta2,
691
- eps=training_args.adam_epsilon,
692
- weight_decay=training_args.weight_decay,
693
- mask=decay_mask_fn,
694
- )
695
-
696
- # Setup train state
697
- state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng)
698
-
699
- # label smoothed cross entropy
700
- def loss_fn(logits, labels, label_smoothing_factor=0.0):
701
- """
702
- The label smoothing implementation is adapted from Flax's official example:
703
- https://github.com/google/flax/blob/87a211135c6a377c8f29048a1cac3840e38b9da4/examples/wmt/train.py#L104
704
- """
705
- vocab_size = logits.shape[-1]
706
- confidence = 1.0 - label_smoothing_factor
707
- low_confidence = (1.0 - confidence) / (vocab_size - 1)
708
- normalizing_constant = -(
709
- confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)
710
- )
711
- soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence)
712
-
713
- loss = optax.softmax_cross_entropy(logits, soft_labels)
714
- loss = loss - normalizing_constant
715
-
716
- # ignore padded tokens from loss, i.e. where labels are not set to -100
717
- padding_mask = labels >= 0
718
- loss = loss * padding_mask
719
- loss = loss.sum()
720
- num_labels = padding_mask.sum()
721
- return loss, num_labels
722
-
723
- # Define gradient update step fn
724
- def train_step(state, batch, label_smoothing_factor=0.0):
725
- dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)
726
-
727
- def compute_loss(params):
728
- labels = batch.pop("labels")
729
- logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
730
- loss, num_labels = loss_fn(logits, labels, label_smoothing_factor)
731
- return loss, num_labels
732
-
733
- grad_fn = jax.value_and_grad(compute_loss, has_aux=True)
734
- (loss, num_labels), grad = grad_fn(state.params)
735
- num_labels = jax.lax.psum(num_labels, "batch")
736
-
737
- # true loss = total loss / total samples
738
- loss = jax.lax.psum(loss, "batch")
739
- loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
740
-
741
- # true grad = total grad / total samples
742
- grad = jax.lax.psum(grad, "batch")
743
- grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad)
744
- new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
745
-
746
- metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
747
- return new_state, metrics
748
-
749
- # Define eval fn
750
- def eval_step(params, batch, label_smoothing_factor=0.0):
751
- labels = batch.pop("labels")
752
- logits = model(**batch, params=params, train=False)[0]
753
-
754
- loss, num_labels = loss_fn(logits, labels, label_smoothing_factor)
755
- num_labels = jax.lax.psum(num_labels, "batch")
756
-
757
- # true loss = total loss / total samples
758
- loss = jax.lax.psum(loss, "batch")
759
- loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
760
-
761
- metrics = {"loss": loss}
762
- return metrics
763
-
764
- # Define generation function
765
- num_beams = model_args.num_beams if model_args.num_beams is not None else model.config.num_beams
766
- gen_kwargs = {"max_length": max_label_length, "num_beams": num_beams}
767
-
768
- def generate_step(params, batch):
769
- model.params = params
770
- output_ids = model.generate(batch[model_input_name], attention_mask=batch.get("attention_mask"), **gen_kwargs)
771
- return output_ids.sequences
772
-
773
- # Create parallel version of the train and eval step
774
- p_train_step = jax.pmap(
775
- partial(train_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch", donate_argnums=(0,)
776
- )
777
- p_eval_step = jax.pmap(partial(eval_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch")
778
- p_generate_step = jax.pmap(generate_step, "batch")
779
-
780
- # Replicate the train state on each device
781
- state = state.replicate()
782
-
783
- logger.info("***** Running training *****")
784
- logger.info(f" Num examples = {len(vectorized_datasets['train'])}")
785
- logger.info(f" Num Epochs = {num_epochs}")
786
- logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
787
- logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}")
788
- logger.info(f" Total optimization steps = {total_train_steps}")
789
-
790
- train_time = 0
791
- epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
792
- for epoch in epochs:
793
- # ======================== Training ================================
794
- train_start = time.time()
795
-
796
- # Create sampling rng
797
- rng, input_rng = jax.random.split(rng)
798
- train_metrics = []
799
-
800
- # Generate an epoch by shuffling sampling indices from the train dataset
801
- train_loader = data_loader(input_rng, vectorized_datasets["train"], train_batch_size, shuffle=True)
802
- # train
803
- for _ in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False):
804
- samples = next(train_loader)
805
- batch = data_collator(samples)
806
- batch = shard(batch.data)
807
- state, train_metric = p_train_step(state, batch)
808
- train_metrics.append(train_metric)
809
-
810
- train_time += time.time() - train_start
811
-
812
- train_metric = unreplicate(train_metric)
813
-
814
- epochs.write(
815
- f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate:"
816
- f" {train_metric['learning_rate']})"
817
- )
818
-
819
- # ======================== Evaluating ==============================
820
- eval_metrics = []
821
- eval_preds = []
822
- eval_labels = []
823
-
824
- eval_loader = data_loader(input_rng, vectorized_datasets["eval"], eval_batch_size, drop_last=False)
825
- eval_steps = math.ceil(len(vectorized_datasets["eval"]) / eval_batch_size)
826
- for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
827
- # Model forward
828
- samples = next(eval_loader)
829
- batch = data_collator(samples)
830
- labels = batch["labels"]
831
-
832
- metrics = pad_shard_unpad(p_eval_step, static_return=True)(
833
- state.params, batch.data, min_device_batch=per_device_eval_batch_size
834
- )
835
- eval_metrics.append(metrics)
836
-
837
- # generation
838
- if training_args.predict_with_generate:
839
- generated_ids = pad_shard_unpad(p_generate_step)(state.params, batch.data)
840
- eval_preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"])))
841
- eval_labels.extend(labels)
842
-
843
- # normalize eval metrics
844
- eval_metrics = get_metrics(eval_metrics)
845
- eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics)
846
-
847
- # compute WER metric
848
- wer_desc = ""
849
- if training_args.predict_with_generate:
850
- wer_metric = compute_metrics(eval_preds, eval_labels)
851
- eval_metrics.update(wer_metric)
852
- wer_desc = " ".join([f"Eval {key}: {value} |" for key, value in wer_metric.items()])
853
-
854
- # Print metrics and update progress bar
855
- desc = f"Epoch... ({epoch + 1}/{num_epochs} | Eval Loss: {eval_metrics['loss']} | {wer_desc})"
856
- epochs.write(desc)
857
- epochs.desc = desc
858
-
859
- # Save metrics
860
- if has_tensorboard and jax.process_index() == 0:
861
- cur_step = epoch * (len(vectorized_datasets["train"]) // train_batch_size)
862
- write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step)
863
-
864
- # save checkpoint after each epoch and push checkpoint to the hub
865
- if jax.process_index() == 0:
866
- params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params))
867
- model.save_pretrained(training_args.output_dir, params=params)
868
- tokenizer.save_pretrained(training_args.output_dir)
869
- if training_args.push_to_hub:
870
- repo.push_to_hub(commit_message=f"Saving weights and logs of epoch {epoch}", blocking=False)
871
-
872
-
873
- if __name__ == "__main__":
874
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
run.sh CHANGED
@@ -1,9 +1,10 @@
1
  python run_flax_speech_recognition_seq2seq.py \
2
  --model_name_or_path openai/whisper-small.en \
3
- --dataset_name hf-internal-testing/librispeech_asr_dummy \
4
- --dataset_config clean \
5
- --train_split_name validation \
6
- --eval_split_name validation \
 
7
  --output_dir whisper-small-flaxtest \
8
  --overwrite_output_dir \
9
  --num_train_epochs=2 \
 
1
  python run_flax_speech_recognition_seq2seq.py \
2
  --model_name_or_path openai/whisper-small.en \
3
+ --dataset_name mozilla-foundation/common_voice_11_0 \
4
+ --dataset_config es \
5
+ --text_column_name sentence \
6
+ --train_split_name test\
7
+ --eval_split_name test\
8
  --output_dir whisper-small-flaxtest \
9
  --overwrite_output_dir \
10
  --num_train_epochs=2 \
run_flax_speech_recognition_seq2seq.py CHANGED
@@ -1,5 +1,3 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
  # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
  #
5
  # Licensed under the Apache License, Version 2.0 (the "License");
 
 
 
1
  # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
run_flax_speech_recognition_seq2seq_streaming.py CHANGED
@@ -177,7 +177,7 @@ class DataTrainingArguments:
177
  metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
178
  )
179
  max_duration_in_seconds: float = field(
180
- default=20.0,
181
  metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"},
182
  )
183
  min_duration_in_seconds: float = field(
@@ -501,7 +501,7 @@ def main():
501
  raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict()
502
 
503
  if training_args.do_train:
504
- raw_datasets["train"] = load_maybe_streaming_dataset(
505
  data_args.dataset_name,
506
  data_args.dataset_config_name,
507
  split=data_args.train_split_name,
@@ -511,7 +511,7 @@ def main():
511
  )
512
 
513
  if training_args.do_eval:
514
- raw_datasets["eval"] = load_maybe_streaming_dataset(
515
  data_args.dataset_name,
516
  data_args.dataset_config_name,
517
  split=data_args.eval_split_name,
@@ -625,69 +625,50 @@ def main():
625
  remove_columns=raw_datasets_features,
626
  ).with_format("torch")
627
 
628
- # Moving this to later. Better ways of doing this
629
- #if training_args.do_train and data_args.streaming:
630
- # manually shuffle if streaming (done by the trainer for non-streaming)
631
- #vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(
632
- # buffer_size=data_args.shuffle_buffer_size,
633
- # seed=training_args.seed,
634
- #)
635
-
636
-
637
  # filter training data with inputs longer than max_input_length
638
  def is_audio_in_length_range(length):
639
  return min_input_length < length < max_input_length
640
 
641
- # For debugging
642
- #def is_audio_in_length_range(length):
643
- # if min_input_length < length < max_input_length:
644
- # return True
645
- # else:
646
- # print(f"Warning: Input length {length} is not within the expected range [{min_input_length}, {max_input_length}].")
647
- # return False
648
-
649
-
650
  if training_args.do_train:
651
- vectorized_datasets["train"] = vectorized_datasets["train"].filter(
652
  is_audio_in_length_range,
653
  input_columns=["input_length"],
654
  )
655
-
656
- # for large datasets it is advised to run the preprocessing on a
657
- # single machine first with `args.preprocessing_only` since there will mostly likely
658
- # be a timeout when running the script in distributed mode.
659
- # In a second step `args.preprocessing_only` can then be set to `False` to load the
660
- # cached dataset
661
- # Not really needed for streaming
662
- # if data_args.preprocessing_only:
663
- # cache = {k: v.cache_files for k, v in vectorized_datasets.items()}
664
- # logger.info(f"Data preprocessing finished. Files cached at {cache}.")
665
- # return
666
-
667
  # 8. Load Metric
668
  metric = evaluate.load("wer")
669
  do_normalize_eval = data_args.do_normalize_eval
670
 
671
- def compute_metrics(pred):
672
- pred_ids = pred.predictions
673
-
674
- pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
675
 
676
- pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
677
  # we do not want to group tokens when computing the metrics
678
- label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
679
-
680
- if do_normalize_eval:
681
- pred_str = [normalizer(pred) for pred in pred_str]
682
- label_str = [normalizer(label) for label in label_str]
683
- # filtering step to only evaluate the samples that correspond to non-zero references:
684
- pred_str = [pred_str[i] for i in range(len(pred_str)) if len(label_str[i]) > 0]
685
- label_str = [label_str[i] for i in range(len(label_str)) if len(label_str[i]) > 0]
686
-
687
- wer = 100 * metric.compute(predictions=pred_str, references=label_str)
688
-
 
 
689
  return {"wer": wer}
690
-
 
691
  # 9. Save feature extractor, tokenizer and config
692
  feature_extractor.save_pretrained(training_args.output_dir)
693
  tokenizer.save_pretrained(training_args.output_dir)
@@ -730,8 +711,7 @@ def main():
730
  # Store some constant
731
  #num_epochs = int(training_args.num_train_epochs)
732
  train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
733
- per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
734
- eval_batch_size = per_device_eval_batch_size * jax.device_count()
735
 
736
 
737
  # Create learning rate schedule
@@ -875,16 +855,16 @@ def main():
875
  # Create sampling rng
876
  #rng, input_rng = jax.random.split(rng)
877
  train_metrics = []
878
- epoch = 0
879
 
 
 
 
 
880
 
881
  # Create a batched data iterator
882
  num_workers = 0
883
- # This is not working
884
- # vectorized_datasets["train"] = vectorized_datasets["train"].shuffle()
885
- batched_data_loader = torch.utils.data.DataLoader( batch_size=train_batch_size, dataset=vectorized_datasets["train"], num_workers=num_workers, collate_fn=lambda x: x )
886
- batched_data_iterator = torch.utils.data.dataloader._SingleProcessDataLoaderIter(batched_data_loader)
887
-
888
 
889
  # train
890
  for step in tqdm(range(data_args.num_train_steps), desc="Training...", position=1, leave=False):
@@ -894,8 +874,9 @@ def main():
894
 
895
  except StopIteration:
896
  epoch += 1
897
- batched_data_loader = torch.utils.data.DataLoader( batch_size=train_batch_size, dataset=vectorized_datasets["train"], num_workers=num_workers, collate_fn=lambda x: x )
898
- batched_data_iterator = torch.utils.data.dataloader._SingleProcessDataLoaderIter(batched_data_loader)
 
899
  samples = next(batched_data_iterator)
900
 
901
  logger.info(
@@ -908,18 +889,23 @@ def main():
908
  batch = shard(batch.data)
909
  state, train_metric = p_train_step(state, batch)
910
 
 
 
 
911
  # ======================== Evaluating ==============================
912
  if step % training_args.eval_steps == 0 and step > 0:
913
- eval_loader = data_loader(input_rng, vectorized_datasets["eval"], eval_batch_size, drop_last=False)
914
-
915
- for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
 
916
  # Model forward
917
- samples = next(eval_loader)
918
- batch = data_collator(samples)
 
919
  labels = batch["labels"]
920
 
921
  metrics = pad_shard_unpad(p_eval_step, static_return=True)(
922
- state.params, batch.data, min_device_batch=per_device_eval_batch_size
923
  )
924
  eval_metrics.append(metrics)
925
 
@@ -944,22 +930,22 @@ def main():
944
  desc = f"Epoch... ({epoch} | Eval Loss: {eval_metrics['loss']} | {wer_desc})"
945
  logger.info(desc)
946
 
947
- train_time += time.time() - train_start
948
- train_metric = unreplicate(train_metric)
949
-
950
-
951
- # Save metrics
952
- if has_tensorboard and jax.process_index() == 0:
953
- cur_step = epoch * (len(vectorized_datasets["train"]) // train_batch_size)
954
- write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step)
955
-
956
- # save checkpoint after each epoch and push checkpoint to the hub
957
- if jax.process_index() == 0:
958
- params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params))
959
- model.save_pretrained(training_args.output_dir, params=params)
960
- tokenizer.save_pretrained(training_args.output_dir)
961
- if training_args.push_to_hub:
962
- repo.push_to_hub(commit_message=f"Saving weights and logs of epoch {epoch}", blocking=False)
963
 
964
 
965
  if __name__ == "__main__":
 
177
  metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
178
  )
179
  max_duration_in_seconds: float = field(
180
+ default=30.0,
181
  metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"},
182
  )
183
  min_duration_in_seconds: float = field(
 
501
  raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict()
502
 
503
  if training_args.do_train:
504
+ raw_datasets[data_args.train_split_name] = load_maybe_streaming_dataset(
505
  data_args.dataset_name,
506
  data_args.dataset_config_name,
507
  split=data_args.train_split_name,
 
511
  )
512
 
513
  if training_args.do_eval:
514
+ raw_datasets[data_args.eval_split_name] = load_maybe_streaming_dataset(
515
  data_args.dataset_name,
516
  data_args.dataset_config_name,
517
  split=data_args.eval_split_name,
 
625
  remove_columns=raw_datasets_features,
626
  ).with_format("torch")
627
 
628
+
 
 
 
 
 
 
 
 
629
  # filter training data with inputs longer than max_input_length
630
  def is_audio_in_length_range(length):
631
  return min_input_length < length < max_input_length
632
 
633
+
 
 
 
 
 
 
 
 
634
  if training_args.do_train:
635
+ vectorized_datasets[data_args.train_split_name] = vectorized_datasets[data_args.train_split_name].filter(
636
  is_audio_in_length_range,
637
  input_columns=["input_length"],
638
  )
639
+ if training_args.do_eval:
640
+ vectorized_datasets[data_args.eval_split_name] = vectorized_datasets[data_args.eval_split_name].filter(
641
+ is_audio_in_length_range,
642
+ input_columns=["input_length"],
643
+ )
644
+
 
 
 
 
 
 
645
  # 8. Load Metric
646
  metric = evaluate.load("wer")
647
  do_normalize_eval = data_args.do_normalize_eval
648
 
649
+ def compute_metrics(preds, labels):
650
+ # replace padded labels by the padding token
651
+ for idx in range(len(labels)):
652
+ labels[idx][labels[idx] == -100] = tokenizer.pad_token_id
653
 
654
+ pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True)
655
  # we do not want to group tokens when computing the metrics
656
+ label_str = tokenizer.batch_decode(labels, skip_special_tokens=True)
657
+
658
+ # TODO
659
+ # We should implement and test this as well
660
+ #if do_normalize_eval:
661
+ # pred_str = [normalizer(pred) for pred in pred_str]
662
+ # label_str = [normalizer(label) for label in label_str]
663
+ # # filtering step to only evaluate the samples that correspond to non-zero references:
664
+ # pred_str = [pred_str[i] for i in range(len(pred_str)) if len(label_str[i]) > 0]
665
+ # label_str = [label_str[i] for i in range(len(label_str)) if len(label_str[i]) > 0]
666
+
667
+
668
+ wer = metric.compute(predictions=pred_str, references=label_str)
669
  return {"wer": wer}
670
+
671
+
672
  # 9. Save feature extractor, tokenizer and config
673
  feature_extractor.save_pretrained(training_args.output_dir)
674
  tokenizer.save_pretrained(training_args.output_dir)
 
711
  # Store some constant
712
  #num_epochs = int(training_args.num_train_epochs)
713
  train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
714
+ eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
 
715
 
716
 
717
  # Create learning rate schedule
 
855
  # Create sampling rng
856
  #rng, input_rng = jax.random.split(rng)
857
  train_metrics = []
 
858
 
859
+ # TODO
860
+ # Do the reset epoch stuff to shuffle
861
+
862
+ epoch = 0
863
 
864
  # Create a batched data iterator
865
  num_workers = 0
866
+ batched_data_loader = torch.utils.data.DataLoader( batch_size=train_batch_size, dataset=vectorized_datasets[data_args.train_split_name], num_workers=num_workers, collate_fn=lambda x: x )
867
+ batched_data_iterator = iter(batched_data_loader)
 
 
 
868
 
869
  # train
870
  for step in tqdm(range(data_args.num_train_steps), desc="Training...", position=1, leave=False):
 
874
 
875
  except StopIteration:
876
  epoch += 1
877
+ # TODO - Not currently shuffled
878
+ batched_data_loader = torch.utils.data.DataLoader( batch_size=train_batch_size, dataset=vectorized_datasets[data_args.train_split_name], num_workers=num_workers, collate_fn=lambda x: x )
879
+ batched_data_iterator = iter(batched_data_loader)
880
  samples = next(batched_data_iterator)
881
 
882
  logger.info(
 
889
  batch = shard(batch.data)
890
  state, train_metric = p_train_step(state, batch)
891
 
892
+ train_time += time.time() - train_start
893
+ train_metric = unreplicate(train_metric)
894
+
895
  # ======================== Evaluating ==============================
896
  if step % training_args.eval_steps == 0 and step > 0:
897
+ batched_data_eval_loader = torch.utils.data.DataLoader( batch_size=eval_batch_size, dataset=vectorized_datasets[data_args.eval_split_name], num_workers=num_workers, collate_fn=lambda x: x )
898
+ batched_data_eval_iterator = iter(batched_data_eval_loader)
899
+
900
+ for _ in tqdm(range(data_args.max_eval_samples//eval_batch_size), desc="Evaluating...", position=2, leave=False):
901
  # Model forward
902
+ samples = next(batched_data_eval_iterator)
903
+ reshaped_samples = {key: [feature[key] for feature in samples] for key in samples[0].keys()}
904
+ batch = data_collator(reshaped_samples)
905
  labels = batch["labels"]
906
 
907
  metrics = pad_shard_unpad(p_eval_step, static_return=True)(
908
+ state.params, batch.data, min_device_batch=training_args.per_device_eval_batch_size
909
  )
910
  eval_metrics.append(metrics)
911
 
 
930
  desc = f"Epoch... ({epoch} | Eval Loss: {eval_metrics['loss']} | {wer_desc})"
931
  logger.info(desc)
932
 
933
+ # Save metrics
934
+ if has_tensorboard and jax.process_index() == 0:
935
+ #TODO
936
+ breakpoint()
937
+ # cur_step = epoch * (len(vectorized_datasets[data_args.train_split_name]) // train_batch_size)
938
+ write_metric(summary_writer, train_metrics, eval_metrics, train_time, data_args.num_train_steps)
939
+
940
+ # TODO THis is not happening at every epoch!!!
941
+ breakpoint()
942
+ # save checkpoint after each epoch and push checkpoint to the hub
943
+ if jax.process_index() == 0:
944
+ params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params))
945
+ model.save_pretrained(training_args.output_dir, params=params)
946
+ tokenizer.save_pretrained(training_args.output_dir)
947
+ if training_args.push_to_hub:
948
+ repo.push_to_hub(commit_message=f"Saving weights and logs of epoch {epoch}", blocking=False)
949
 
950
 
951
  if __name__ == "__main__":
run_test.sh → run_streaming.sh RENAMED
@@ -5,17 +5,18 @@ python run_flax_speech_recognition_seq2seq_streaming.py \
5
  --text_column_name sentence \
6
  --train_split_name test\
7
  --eval_split_name test\
8
- --output_dir whisper-tiny-ft-dummy\
9
  --overwrite_output_dir\
10
  --num_train_epochs=1\
11
  --warmup_steps=8 \
12
  --do_train \
13
  --do_eval \
14
- --num_train_steps 1000 \
15
- --eval_steps 100 \
 
16
  --learning_rate=2e-4 \
17
  --per_device_train_batch_size=2 \
18
- --per_device_eval_batch_size=4 \
19
  --predict_with_generate \
20
  --streaming=True \
21
 
 
5
  --text_column_name sentence \
6
  --train_split_name test\
7
  --eval_split_name test\
8
+ --output_dir whisper-small-flaxtest\
9
  --overwrite_output_dir\
10
  --num_train_epochs=1\
11
  --warmup_steps=8 \
12
  --do_train \
13
  --do_eval \
14
+ --num_train_steps 100 \
15
+ --max_eval_samples 100 \
16
+ --eval_steps 50 \
17
  --learning_rate=2e-4 \
18
  --per_device_train_batch_size=2 \
19
+ --per_device_eval_batch_size=2 \
20
  --predict_with_generate \
21
  --streaming=True \
22
 
whisper-small-flaxtest/added_tokens.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|af|>": 50326,
3
+ "<|am|>": 50333,
4
+ "<|ar|>": 50271,
5
+ "<|as|>": 50349,
6
+ "<|az|>": 50303,
7
+ "<|ba|>": 50354,
8
+ "<|be|>": 50329,
9
+ "<|bg|>": 50291,
10
+ "<|bn|>": 50301,
11
+ "<|bo|>": 50346,
12
+ "<|br|>": 50308,
13
+ "<|bs|>": 50314,
14
+ "<|ca|>": 50269,
15
+ "<|cs|>": 50282,
16
+ "<|cy|>": 50296,
17
+ "<|da|>": 50284,
18
+ "<|de|>": 50260,
19
+ "<|el|>": 50280,
20
+ "<|en|>": 50258,
21
+ "<|es|>": 50261,
22
+ "<|et|>": 50306,
23
+ "<|eu|>": 50309,
24
+ "<|fa|>": 50299,
25
+ "<|fi|>": 50276,
26
+ "<|fo|>": 50337,
27
+ "<|fr|>": 50264,
28
+ "<|gl|>": 50318,
29
+ "<|gu|>": 50332,
30
+ "<|haw|>": 50351,
31
+ "<|ha|>": 50353,
32
+ "<|hi|>": 50275,
33
+ "<|hr|>": 50290,
34
+ "<|ht|>": 50338,
35
+ "<|hu|>": 50285,
36
+ "<|hy|>": 50311,
37
+ "<|id|>": 50274,
38
+ "<|is|>": 50310,
39
+ "<|it|>": 50273,
40
+ "<|iw|>": 50278,
41
+ "<|ja|>": 50265,
42
+ "<|jw|>": 50355,
43
+ "<|ka|>": 50328,
44
+ "<|kk|>": 50315,
45
+ "<|km|>": 50322,
46
+ "<|kn|>": 50305,
47
+ "<|ko|>": 50263,
48
+ "<|la|>": 50293,
49
+ "<|lb|>": 50344,
50
+ "<|ln|>": 50352,
51
+ "<|lo|>": 50335,
52
+ "<|lt|>": 50292,
53
+ "<|lv|>": 50300,
54
+ "<|mg|>": 50348,
55
+ "<|mi|>": 50294,
56
+ "<|mk|>": 50307,
57
+ "<|ml|>": 50295,
58
+ "<|mn|>": 50313,
59
+ "<|mr|>": 50319,
60
+ "<|ms|>": 50281,
61
+ "<|mt|>": 50342,
62
+ "<|my|>": 50345,
63
+ "<|ne|>": 50312,
64
+ "<|nl|>": 50270,
65
+ "<|nn|>": 50341,
66
+ "<|nocaptions|>": 50361,
67
+ "<|notimestamps|>": 50362,
68
+ "<|no|>": 50287,
69
+ "<|oc|>": 50327,
70
+ "<|pa|>": 50320,
71
+ "<|pl|>": 50268,
72
+ "<|ps|>": 50339,
73
+ "<|pt|>": 50266,
74
+ "<|ro|>": 50283,
75
+ "<|ru|>": 50262,
76
+ "<|sa|>": 50343,
77
+ "<|sd|>": 50331,
78
+ "<|si|>": 50321,
79
+ "<|sk|>": 50297,
80
+ "<|sl|>": 50304,
81
+ "<|sn|>": 50323,
82
+ "<|so|>": 50325,
83
+ "<|sq|>": 50316,
84
+ "<|sr|>": 50302,
85
+ "<|startoflm|>": 50359,
86
+ "<|startofprev|>": 50360,
87
+ "<|startoftranscript|>": 50257,
88
+ "<|su|>": 50356,
89
+ "<|sv|>": 50272,
90
+ "<|sw|>": 50317,
91
+ "<|ta|>": 50286,
92
+ "<|te|>": 50298,
93
+ "<|tg|>": 50330,
94
+ "<|th|>": 50288,
95
+ "<|tk|>": 50340,
96
+ "<|tl|>": 50347,
97
+ "<|transcribe|>": 50358,
98
+ "<|translate|>": 50357,
99
+ "<|tr|>": 50267,
100
+ "<|tt|>": 50350,
101
+ "<|uk|>": 50279,
102
+ "<|ur|>": 50289,
103
+ "<|uz|>": 50336,
104
+ "<|vi|>": 50277,
105
+ "<|yi|>": 50334,
106
+ "<|yo|>": 50324,
107
+ "<|zh|>": 50259
108
+ }
whisper-small-flaxtest/config.json ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/whisper-tiny.en",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "apply_spec_augment": false,
6
+ "architectures": [
7
+ "WhisperForConditionalGeneration"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "begin_suppress_tokens": [
11
+ 220,
12
+ 50256
13
+ ],
14
+ "bos_token_id": 50257,
15
+ "d_model": 384,
16
+ "decoder_attention_heads": 6,
17
+ "decoder_ffn_dim": 1536,
18
+ "decoder_layerdrop": 0.0,
19
+ "decoder_layers": 4,
20
+ "decoder_start_token_id": 50257,
21
+ "dropout": 0.0,
22
+ "encoder_attention_heads": 6,
23
+ "encoder_ffn_dim": 1536,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 4,
26
+ "eos_token_id": 50256,
27
+ "forced_decoder_ids": [
28
+ [
29
+ 1,
30
+ 50362
31
+ ]
32
+ ],
33
+ "init_std": 0.02,
34
+ "is_encoder_decoder": true,
35
+ "mask_feature_length": 10,
36
+ "mask_feature_min_masks": 0,
37
+ "mask_feature_prob": 0.0,
38
+ "mask_time_length": 10,
39
+ "mask_time_min_masks": 2,
40
+ "mask_time_prob": 0.05,
41
+ "max_length": 448,
42
+ "max_source_positions": 1500,
43
+ "max_target_positions": 448,
44
+ "model_type": "whisper",
45
+ "num_hidden_layers": 4,
46
+ "num_mel_bins": 80,
47
+ "pad_token_id": 50256,
48
+ "scale_embedding": false,
49
+ "suppress_tokens": [
50
+ 1,
51
+ 2,
52
+ 7,
53
+ 8,
54
+ 9,
55
+ 10,
56
+ 14,
57
+ 25,
58
+ 26,
59
+ 27,
60
+ 28,
61
+ 29,
62
+ 31,
63
+ 58,
64
+ 59,
65
+ 60,
66
+ 61,
67
+ 62,
68
+ 63,
69
+ 90,
70
+ 91,
71
+ 92,
72
+ 93,
73
+ 357,
74
+ 366,
75
+ 438,
76
+ 532,
77
+ 685,
78
+ 705,
79
+ 796,
80
+ 930,
81
+ 1058,
82
+ 1220,
83
+ 1267,
84
+ 1279,
85
+ 1303,
86
+ 1343,
87
+ 1377,
88
+ 1391,
89
+ 1635,
90
+ 1782,
91
+ 1875,
92
+ 2162,
93
+ 2361,
94
+ 2488,
95
+ 3467,
96
+ 4008,
97
+ 4211,
98
+ 4600,
99
+ 4808,
100
+ 5299,
101
+ 5855,
102
+ 6329,
103
+ 7203,
104
+ 9609,
105
+ 9959,
106
+ 10563,
107
+ 10786,
108
+ 11420,
109
+ 11709,
110
+ 11907,
111
+ 13163,
112
+ 13697,
113
+ 13700,
114
+ 14808,
115
+ 15306,
116
+ 16410,
117
+ 16791,
118
+ 17992,
119
+ 19203,
120
+ 19510,
121
+ 20724,
122
+ 22305,
123
+ 22935,
124
+ 27007,
125
+ 30109,
126
+ 30420,
127
+ 33409,
128
+ 34949,
129
+ 40283,
130
+ 40493,
131
+ 40549,
132
+ 47282,
133
+ 49146,
134
+ 50257,
135
+ 50359,
136
+ 50360,
137
+ 50361
138
+ ],
139
+ "torch_dtype": "float32",
140
+ "transformers_version": "4.27.0.dev0",
141
+ "use_cache": true,
142
+ "vocab_size": 51864
143
+ }
whisper-small-flaxtest/events.out.tfevents.1677611724.t1v-n-d163ce9a-w-0.1583171.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9277daa3c525efff75929a791e43739542c8b06300cd7e0063ffc45416db3592
3
+ size 40
whisper-small-flaxtest/events.out.tfevents.1677613551.t1v-n-d163ce9a-w-0.1702844.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:090e22643a37a0ccc28a8d5654c3d6c08569bfb99b5779934dad613905abe4bb
3
+ size 40
whisper-small-flaxtest/events.out.tfevents.1677613844.t1v-n-d163ce9a-w-0.1706687.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e1c74cb53e0effd274e51aae5374d6a418525fb8eb2933e9108f568203ddd44
3
+ size 40
whisper-small-flaxtest/events.out.tfevents.1677614511.t1v-n-d163ce9a-w-0.1904376.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bef180e271f9a1f610835e098dfe01c68fbe5b791708714f866eaeeae76c1730
3
+ size 40
whisper-small-flaxtest/events.out.tfevents.1677615119.t1v-n-d163ce9a-w-0.2101561.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d63e63d1335b2c7c223774e073f1ffeada56385d68fdbd3fe717767749d153b9
3
+ size 40
whisper-small-flaxtest/events.out.tfevents.1677615611.t1v-n-d163ce9a-w-0.2298739.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fe126e87c513b3ef6f7a5bd6333f15c9aeb960d710d91208127f0195a4d336e
3
+ size 40
whisper-small-flaxtest/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
whisper-small-flaxtest/normalizer.json ADDED
@@ -0,0 +1,1742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accessorise": "accessorize",
3
+ "accessorised": "accessorized",
4
+ "accessorises": "accessorizes",
5
+ "accessorising": "accessorizing",
6
+ "acclimatisation": "acclimatization",
7
+ "acclimatise": "acclimatize",
8
+ "acclimatised": "acclimatized",
9
+ "acclimatises": "acclimatizes",
10
+ "acclimatising": "acclimatizing",
11
+ "accoutrements": "accouterments",
12
+ "aeon": "eon",
13
+ "aeons": "eons",
14
+ "aerogramme": "aerogram",
15
+ "aerogrammes": "aerograms",
16
+ "aeroplane": "airplane",
17
+ "aeroplanes": "airplanes",
18
+ "aesthete": "esthete",
19
+ "aesthetes": "esthetes",
20
+ "aesthetic": "esthetic",
21
+ "aesthetically": "esthetically",
22
+ "aesthetics": "esthetics",
23
+ "aetiology": "etiology",
24
+ "ageing": "aging",
25
+ "aggrandisement": "aggrandizement",
26
+ "agonise": "agonize",
27
+ "agonised": "agonized",
28
+ "agonises": "agonizes",
29
+ "agonising": "agonizing",
30
+ "agonisingly": "agonizingly",
31
+ "almanack": "almanac",
32
+ "almanacks": "almanacs",
33
+ "aluminium": "aluminum",
34
+ "amortisable": "amortizable",
35
+ "amortisation": "amortization",
36
+ "amortisations": "amortizations",
37
+ "amortise": "amortize",
38
+ "amortised": "amortized",
39
+ "amortises": "amortizes",
40
+ "amortising": "amortizing",
41
+ "amphitheatre": "amphitheater",
42
+ "amphitheatres": "amphitheaters",
43
+ "anaemia": "anemia",
44
+ "anaemic": "anemic",
45
+ "anaesthesia": "anesthesia",
46
+ "anaesthetic": "anesthetic",
47
+ "anaesthetics": "anesthetics",
48
+ "anaesthetise": "anesthetize",
49
+ "anaesthetised": "anesthetized",
50
+ "anaesthetises": "anesthetizes",
51
+ "anaesthetising": "anesthetizing",
52
+ "anaesthetist": "anesthetist",
53
+ "anaesthetists": "anesthetists",
54
+ "anaesthetize": "anesthetize",
55
+ "anaesthetized": "anesthetized",
56
+ "anaesthetizes": "anesthetizes",
57
+ "anaesthetizing": "anesthetizing",
58
+ "analogue": "analog",
59
+ "analogues": "analogs",
60
+ "analyse": "analyze",
61
+ "analysed": "analyzed",
62
+ "analyses": "analyzes",
63
+ "analysing": "analyzing",
64
+ "anglicise": "anglicize",
65
+ "anglicised": "anglicized",
66
+ "anglicises": "anglicizes",
67
+ "anglicising": "anglicizing",
68
+ "annualised": "annualized",
69
+ "antagonise": "antagonize",
70
+ "antagonised": "antagonized",
71
+ "antagonises": "antagonizes",
72
+ "antagonising": "antagonizing",
73
+ "apologise": "apologize",
74
+ "apologised": "apologized",
75
+ "apologises": "apologizes",
76
+ "apologising": "apologizing",
77
+ "appal": "appall",
78
+ "appals": "appalls",
79
+ "appetiser": "appetizer",
80
+ "appetisers": "appetizers",
81
+ "appetising": "appetizing",
82
+ "appetisingly": "appetizingly",
83
+ "arbour": "arbor",
84
+ "arbours": "arbors",
85
+ "archaeologically": "archeologically",
86
+ "archaeologist": "archeologist",
87
+ "archaeologists": "archeologists",
88
+ "archaeology": "archeology</span>",
89
+ "archeological": "archaeological",
90
+ "ardour": "ardor",
91
+ "armour": "armor",
92
+ "armoured": "armored",
93
+ "armourer": "armorer",
94
+ "armourers": "armorers",
95
+ "armouries": "armories",
96
+ "armoury": "armory",
97
+ "artefact": "artifact",
98
+ "artefacts": "artifacts",
99
+ "authorise": "authorize",
100
+ "authorised": "authorized",
101
+ "authorises": "authorizes",
102
+ "authorising": "authorizing",
103
+ "axe": "ax",
104
+ "backpedalled": "backpedaled",
105
+ "backpedalling": "backpedaling",
106
+ "bannister": "banister",
107
+ "bannisters": "banisters",
108
+ "baptise": "baptize",
109
+ "baptised": "baptized",
110
+ "baptises": "baptizes",
111
+ "baptising": "baptizing",
112
+ "bastardise": "bastardize",
113
+ "bastardised": "bastardized",
114
+ "bastardises": "bastardizes",
115
+ "bastardising": "bastardizing",
116
+ "battleax": "battleaxe",
117
+ "baulk": "balk",
118
+ "baulked": "balked",
119
+ "baulking": "balking",
120
+ "baulks": "balks",
121
+ "bedevilled": "bedeviled",
122
+ "bedevilling": "bedeviling",
123
+ "behaviour": "behavior",
124
+ "behavioural": "behavioral",
125
+ "behaviourism": "behaviorism",
126
+ "behaviourist": "behaviorist",
127
+ "behaviourists": "behaviorists",
128
+ "behaviours": "behaviors",
129
+ "behove": "behoove",
130
+ "behoved": "behooved",
131
+ "behoves": "behooves",
132
+ "bejewelled": "bejeweled",
133
+ "belabour": "belabor",
134
+ "belaboured": "belabored",
135
+ "belabouring": "belaboring",
136
+ "belabours": "belabors",
137
+ "bevelled": "beveled",
138
+ "bevvies": "bevies",
139
+ "bevvy": "bevy",
140
+ "biassed": "biased",
141
+ "biassing": "biasing",
142
+ "bingeing": "binging",
143
+ "bougainvillaea": "bougainvillea",
144
+ "bougainvillaeas": "bougainvilleas",
145
+ "bowdlerise": "bowdlerize",
146
+ "bowdlerised": "bowdlerized",
147
+ "bowdlerises": "bowdlerizes",
148
+ "bowdlerising": "bowdlerizing",
149
+ "breathalyse": "breathalyze",
150
+ "breathalysed": "breathalyzed",
151
+ "breathalyser": "breathalyzer",
152
+ "breathalysers": "breathalyzers",
153
+ "breathalyses": "breathalyzes",
154
+ "breathalysing": "breathalyzing",
155
+ "brutalise": "brutalize",
156
+ "brutalised": "brutalized",
157
+ "brutalises": "brutalizes",
158
+ "brutalising": "brutalizing",
159
+ "busses": "buses",
160
+ "bussing": "busing",
161
+ "caesarean": "cesarean",
162
+ "caesareans": "cesareans",
163
+ "calibre": "caliber",
164
+ "calibres": "calibers",
165
+ "calliper": "caliper",
166
+ "callipers": "calipers",
167
+ "callisthenics": "calisthenics",
168
+ "canalise": "canalize",
169
+ "canalised": "canalized",
170
+ "canalises": "canalizes",
171
+ "canalising": "canalizing",
172
+ "cancelation": "cancellation",
173
+ "cancelations": "cancellations",
174
+ "cancelled": "canceled",
175
+ "cancelling": "canceling",
176
+ "candour": "candor",
177
+ "cannibalise": "cannibalize",
178
+ "cannibalised": "cannibalized",
179
+ "cannibalises": "cannibalizes",
180
+ "cannibalising": "cannibalizing",
181
+ "canonise": "canonize",
182
+ "canonised": "canonized",
183
+ "canonises": "canonizes",
184
+ "canonising": "canonizing",
185
+ "capitalise": "capitalize",
186
+ "capitalised": "capitalized",
187
+ "capitalises": "capitalizes",
188
+ "capitalising": "capitalizing",
189
+ "caramelise": "caramelize",
190
+ "caramelised": "caramelized",
191
+ "caramelises": "caramelizes",
192
+ "caramelising": "caramelizing",
193
+ "carbonise": "carbonize",
194
+ "carbonised": "carbonized",
195
+ "carbonises": "carbonizes",
196
+ "carbonising": "carbonizing",
197
+ "carolled": "caroled",
198
+ "carolling": "caroling",
199
+ "catalogue": "catalog",
200
+ "catalogued": "cataloged",
201
+ "catalogues": "catalogs",
202
+ "cataloguing": "cataloging",
203
+ "catalyse": "catalyze",
204
+ "catalysed": "catalyzed",
205
+ "catalyses": "catalyzes",
206
+ "catalysing": "catalyzing",
207
+ "categorise": "categorize",
208
+ "categorised": "categorized",
209
+ "categorises": "categorizes",
210
+ "categorising": "categorizing",
211
+ "cauterise": "cauterize",
212
+ "cauterised": "cauterized",
213
+ "cauterises": "cauterizes",
214
+ "cauterising": "cauterizing",
215
+ "cavilled": "caviled",
216
+ "cavilling": "caviling",
217
+ "centigramme": "centigram",
218
+ "centigrammes": "centigrams",
219
+ "centilitre": "centiliter",
220
+ "centilitres": "centiliters",
221
+ "centimetre": "centimeter",
222
+ "centimetres": "centimeters",
223
+ "centralise": "centralize",
224
+ "centralised": "centralized",
225
+ "centralises": "centralizes",
226
+ "centralising": "centralizing",
227
+ "centre": "center",
228
+ "centred": "centered",
229
+ "centrefold": "centerfold",
230
+ "centrefolds": "centerfolds",
231
+ "centrepiece": "centerpiece",
232
+ "centrepieces": "centerpieces",
233
+ "centres": "centers",
234
+ "channelled": "channeled",
235
+ "channelling": "channeling",
236
+ "characterise": "characterize",
237
+ "characterised": "characterized",
238
+ "characterises": "characterizes",
239
+ "characterising": "characterizing",
240
+ "cheque": "check",
241
+ "chequebook": "checkbook",
242
+ "chequebooks": "checkbooks",
243
+ "chequered": "checkered",
244
+ "cheques": "checks",
245
+ "chilli": "chili",
246
+ "chimaera": "chimera",
247
+ "chimaeras": "chimeras",
248
+ "chiselled": "chiseled",
249
+ "chiselling": "chiseling",
250
+ "circularise": "circularize",
251
+ "circularised": "circularized",
252
+ "circularises": "circularizes",
253
+ "circularising": "circularizing",
254
+ "civilise": "civilize",
255
+ "civilised": "civilized",
256
+ "civilises": "civilizes",
257
+ "civilising": "civilizing",
258
+ "clamour": "clamor",
259
+ "clamoured": "clamored",
260
+ "clamouring": "clamoring",
261
+ "clamours": "clamors",
262
+ "clangour": "clangor",
263
+ "clarinettist": "clarinetist",
264
+ "clarinettists": "clarinetists",
265
+ "collectivise": "collectivize",
266
+ "collectivised": "collectivized",
267
+ "collectivises": "collectivizes",
268
+ "collectivising": "collectivizing",
269
+ "colonisation": "colonization",
270
+ "colonise": "colonize",
271
+ "colonised": "colonized",
272
+ "coloniser": "colonizer",
273
+ "colonisers": "colonizers",
274
+ "colonises": "colonizes",
275
+ "colonising": "colonizing",
276
+ "colour": "color",
277
+ "colourant": "colorant",
278
+ "colourants": "colorants",
279
+ "coloured": "colored",
280
+ "coloureds": "coloreds",
281
+ "colourful": "colorful",
282
+ "colourfully": "colorfully",
283
+ "colouring": "coloring",
284
+ "colourize": "colorize",
285
+ "colourized": "colorized",
286
+ "colourizes": "colorizes",
287
+ "colourizing": "colorizing",
288
+ "colourless": "colorless",
289
+ "colours": "colors",
290
+ "commercialise": "commercialize",
291
+ "commercialised": "commercialized",
292
+ "commercialises": "commercializes",
293
+ "commercialising": "commercializing",
294
+ "compartmentalise": "compartmentalize",
295
+ "compartmentalised": "compartmentalized",
296
+ "compartmentalises": "compartmentalizes",
297
+ "compartmentalising": "compartmentalizing",
298
+ "computerise": "computerize",
299
+ "computerised": "computerized",
300
+ "computerises": "computerizes",
301
+ "computerising": "computerizing",
302
+ "conceptualise": "conceptualize",
303
+ "conceptualised": "conceptualized",
304
+ "conceptualises": "conceptualizes",
305
+ "conceptualising": "conceptualizing",
306
+ "connexion": "connection",
307
+ "connexions": "connections",
308
+ "contextualise": "contextualize",
309
+ "contextualised": "contextualized",
310
+ "contextualises": "contextualizes",
311
+ "contextualising": "contextualizing",
312
+ "cosier": "cozier",
313
+ "cosies": "cozies",
314
+ "cosiest": "coziest",
315
+ "cosily": "cozily",
316
+ "cosiness": "coziness",
317
+ "cosy": "cozy",
318
+ "councillor": "councilor",
319
+ "councillors": "councilors",
320
+ "counselled": "counseled",
321
+ "counselling": "counseling",
322
+ "counsellor": "counselor",
323
+ "counsellors": "counselors",
324
+ "crenelated": "crenellated",
325
+ "criminalise": "criminalize",
326
+ "criminalised": "criminalized",
327
+ "criminalises": "criminalizes",
328
+ "criminalising": "criminalizing",
329
+ "criticise": "criticize",
330
+ "criticised": "criticized",
331
+ "criticises": "criticizes",
332
+ "criticising": "criticizing",
333
+ "crueller": "crueler",
334
+ "cruellest": "cruelest",
335
+ "crystallisation": "crystallization",
336
+ "crystallise": "crystallize",
337
+ "crystallised": "crystallized",
338
+ "crystallises": "crystallizes",
339
+ "crystallising": "crystallizing",
340
+ "cudgelled": "cudgeled",
341
+ "cudgelling": "cudgeling",
342
+ "customise": "customize",
343
+ "customised": "customized",
344
+ "customises": "customizes",
345
+ "customising": "customizing",
346
+ "cypher": "cipher",
347
+ "cyphers": "ciphers",
348
+ "decentralisation": "decentralization",
349
+ "decentralise": "decentralize",
350
+ "decentralised": "decentralized",
351
+ "decentralises": "decentralizes",
352
+ "decentralising": "decentralizing",
353
+ "decriminalisation": "decriminalization",
354
+ "decriminalise": "decriminalize",
355
+ "decriminalised": "decriminalized",
356
+ "decriminalises": "decriminalizes",
357
+ "decriminalising": "decriminalizing",
358
+ "defence": "defense",
359
+ "defenceless": "defenseless",
360
+ "defences": "defenses",
361
+ "dehumanisation": "dehumanization",
362
+ "dehumanise": "dehumanize",
363
+ "dehumanised": "dehumanized",
364
+ "dehumanises": "dehumanizes",
365
+ "dehumanising": "dehumanizing",
366
+ "demeanour": "demeanor",
367
+ "demilitarisation": "demilitarization",
368
+ "demilitarise": "demilitarize",
369
+ "demilitarised": "demilitarized",
370
+ "demilitarises": "demilitarizes",
371
+ "demilitarising": "demilitarizing",
372
+ "demobilisation": "demobilization",
373
+ "demobilise": "demobilize",
374
+ "demobilised": "demobilized",
375
+ "demobilises": "demobilizes",
376
+ "demobilising": "demobilizing",
377
+ "democratisation": "democratization",
378
+ "democratise": "democratize",
379
+ "democratised": "democratized",
380
+ "democratises": "democratizes",
381
+ "democratising": "democratizing",
382
+ "demonise": "demonize",
383
+ "demonised": "demonized",
384
+ "demonises": "demonizes",
385
+ "demonising": "demonizing",
386
+ "demoralisation": "demoralization",
387
+ "demoralise": "demoralize",
388
+ "demoralised": "demoralized",
389
+ "demoralises": "demoralizes",
390
+ "demoralising": "demoralizing",
391
+ "denationalisation": "denationalization",
392
+ "denationalise": "denationalize",
393
+ "denationalised": "denationalized",
394
+ "denationalises": "denationalizes",
395
+ "denationalising": "denationalizing",
396
+ "deodorise": "deodorize",
397
+ "deodorised": "deodorized",
398
+ "deodorises": "deodorizes",
399
+ "deodorising": "deodorizing",
400
+ "depersonalise": "depersonalize",
401
+ "depersonalised": "depersonalized",
402
+ "depersonalises": "depersonalizes",
403
+ "depersonalising": "depersonalizing",
404
+ "deputise": "deputize",
405
+ "deputised": "deputized",
406
+ "deputises": "deputizes",
407
+ "deputising": "deputizing",
408
+ "desensitisation": "desensitization",
409
+ "desensitise": "desensitize",
410
+ "desensitised": "desensitized",
411
+ "desensitises": "desensitizes",
412
+ "desensitising": "desensitizing",
413
+ "destabilisation": "destabilization",
414
+ "destabilise": "destabilize",
415
+ "destabilised": "destabilized",
416
+ "destabilises": "destabilizes",
417
+ "destabilising": "destabilizing",
418
+ "dialled": "dialed",
419
+ "dialling": "dialing",
420
+ "dialogue": "dialog",
421
+ "dialogues": "dialogs",
422
+ "diarrhoea": "diarrhea",
423
+ "digitise": "digitize",
424
+ "digitised": "digitized",
425
+ "digitises": "digitizes",
426
+ "digitising": "digitizing",
427
+ "disc": "disk",
428
+ "discolour": "discolor",
429
+ "discoloured": "discolored",
430
+ "discolouring": "discoloring",
431
+ "discolours": "discolors",
432
+ "discs": "disks",
433
+ "disembowelled": "disemboweled",
434
+ "disembowelling": "disemboweling",
435
+ "disfavour": "disfavor",
436
+ "dishevelled": "disheveled",
437
+ "dishonour": "dishonor",
438
+ "dishonourable": "dishonorable",
439
+ "dishonourably": "dishonorably",
440
+ "dishonoured": "dishonored",
441
+ "dishonouring": "dishonoring",
442
+ "dishonours": "dishonors",
443
+ "disorganisation": "disorganization",
444
+ "disorganised": "disorganized",
445
+ "distil": "distill",
446
+ "distils": "distills",
447
+ "dramatisation": "dramatization",
448
+ "dramatisations": "dramatizations",
449
+ "dramatise": "dramatize",
450
+ "dramatised": "dramatized",
451
+ "dramatises": "dramatizes",
452
+ "dramatising": "dramatizing",
453
+ "draught": "draft",
454
+ "draughtboard": "draftboard",
455
+ "draughtboards": "draftboards",
456
+ "draughtier": "draftier",
457
+ "draughtiest": "draftiest",
458
+ "draughts": "drafts",
459
+ "draughtsman": "draftsman",
460
+ "draughtsmanship": "draftsmanship",
461
+ "draughtsmen": "draftsmen",
462
+ "draughtswoman": "draftswoman",
463
+ "draughtswomen": "draftswomen",
464
+ "draughty": "drafty",
465
+ "drivelled": "driveled",
466
+ "drivelling": "driveling",
467
+ "duelled": "dueled",
468
+ "duelling": "dueling",
469
+ "economise": "economize",
470
+ "economised": "economized",
471
+ "economises": "economizes",
472
+ "economising": "economizing",
473
+ "editorialise": "editorialize",
474
+ "editorialised": "editorialized",
475
+ "editorialises": "editorializes",
476
+ "editorialising": "editorializing",
477
+ "edoema": "edema",
478
+ "empathise": "empathize",
479
+ "empathised": "empathized",
480
+ "empathises": "empathizes",
481
+ "empathising": "empathizing",
482
+ "emphasise": "emphasize",
483
+ "emphasised": "emphasized",
484
+ "emphasises": "emphasizes",
485
+ "emphasising": "emphasizing",
486
+ "enamelled": "enameled",
487
+ "enamelling": "enameling",
488
+ "enamoured": "enamored",
489
+ "encyclopaedia": "encyclopedia",
490
+ "encyclopaedias": "encyclopedias",
491
+ "encyclopaedic": "encyclopedic",
492
+ "endeavour": "endeavor",
493
+ "endeavoured": "endeavored",
494
+ "endeavouring": "endeavoring",
495
+ "endeavours": "endeavors",
496
+ "energise": "energize",
497
+ "energised": "energized",
498
+ "energises": "energizes",
499
+ "energising": "energizing",
500
+ "enrol": "enroll",
501
+ "enrols": "enrolls",
502
+ "enthral": "enthrall",
503
+ "enthrals": "enthralls",
504
+ "epaulette": "epaulet",
505
+ "epaulettes": "epaulets",
506
+ "epicentre": "epicenter",
507
+ "epicentres": "epicenters",
508
+ "epilogue": "epilog",
509
+ "epilogues": "epilogs",
510
+ "epitomise": "epitomize",
511
+ "epitomised": "epitomized",
512
+ "epitomises": "epitomizes",
513
+ "epitomising": "epitomizing",
514
+ "equalisation": "equalization",
515
+ "equalise": "equalize",
516
+ "equalised": "equalized",
517
+ "equaliser": "equalizer",
518
+ "equalisers": "equalizers",
519
+ "equalises": "equalizes",
520
+ "equalising": "equalizing",
521
+ "eulogise": "eulogize",
522
+ "eulogised": "eulogized",
523
+ "eulogises": "eulogizes",
524
+ "eulogising": "eulogizing",
525
+ "evangelise": "evangelize",
526
+ "evangelised": "evangelized",
527
+ "evangelises": "evangelizes",
528
+ "evangelising": "evangelizing",
529
+ "exorcise": "exorcize",
530
+ "exorcised": "exorcized",
531
+ "exorcises": "exorcizes",
532
+ "exorcising": "exorcizing",
533
+ "extemporisation": "extemporization",
534
+ "extemporise": "extemporize",
535
+ "extemporised": "extemporized",
536
+ "extemporises": "extemporizes",
537
+ "extemporising": "extemporizing",
538
+ "externalisation": "externalization",
539
+ "externalisations": "externalizations",
540
+ "externalise": "externalize",
541
+ "externalised": "externalized",
542
+ "externalises": "externalizes",
543
+ "externalising": "externalizing",
544
+ "factorise": "factorize",
545
+ "factorised": "factorized",
546
+ "factorises": "factorizes",
547
+ "factorising": "factorizing",
548
+ "faecal": "fecal",
549
+ "faeces": "feces",
550
+ "familiarisation": "familiarization",
551
+ "familiarise": "familiarize",
552
+ "familiarised": "familiarized",
553
+ "familiarises": "familiarizes",
554
+ "familiarising": "familiarizing",
555
+ "fantasise": "fantasize",
556
+ "fantasised": "fantasized",
557
+ "fantasises": "fantasizes",
558
+ "fantasising": "fantasizing",
559
+ "favour": "favor",
560
+ "favourable": "favorable",
561
+ "favourably": "favorably",
562
+ "favoured": "favored",
563
+ "favouring": "favoring",
564
+ "favourite": "favorite",
565
+ "favourites": "favorites",
566
+ "favouritism": "favoritism",
567
+ "favours": "favors",
568
+ "feminise": "feminize",
569
+ "feminised": "feminized",
570
+ "feminises": "feminizes",
571
+ "feminising": "feminizing",
572
+ "fertilisation": "fertilization",
573
+ "fertilise": "fertilize",
574
+ "fertilised": "fertilized",
575
+ "fertiliser": "fertilizer",
576
+ "fertilisers": "fertilizers",
577
+ "fertilises": "fertilizes",
578
+ "fertilising": "fertilizing",
579
+ "fervour": "fervor",
580
+ "fibre": "fiber",
581
+ "fibreglass": "fiberglass",
582
+ "fibres": "fibers",
583
+ "fictionalisation": "fictionalization",
584
+ "fictionalisations": "fictionalizations",
585
+ "fictionalise": "fictionalize",
586
+ "fictionalised": "fictionalized",
587
+ "fictionalises": "fictionalizes",
588
+ "fictionalising": "fictionalizing",
589
+ "fillet": "filet",
590
+ "filleted": "fileted",
591
+ "filleting": "fileting",
592
+ "fillets": "filets",
593
+ "finalisation": "finalization",
594
+ "finalise": "finalize",
595
+ "finalised": "finalized",
596
+ "finalises": "finalizes",
597
+ "finalising": "finalizing",
598
+ "flautist": "flutist",
599
+ "flautists": "flutists",
600
+ "flavour": "flavor",
601
+ "flavoured": "flavored",
602
+ "flavouring": "flavoring",
603
+ "flavourings": "flavorings",
604
+ "flavourless": "flavorless",
605
+ "flavours": "flavors",
606
+ "flavoursome": "flavorsome",
607
+ "flyer / flier": "flier / flyer",
608
+ "foetal": "fetal",
609
+ "foetid": "fetid",
610
+ "foetus": "fetus",
611
+ "foetuses": "fetuses",
612
+ "formalisation": "formalization",
613
+ "formalise": "formalize",
614
+ "formalised": "formalized",
615
+ "formalises": "formalizes",
616
+ "formalising": "formalizing",
617
+ "fossilisation": "fossilization",
618
+ "fossilise": "fossilize",
619
+ "fossilised": "fossilized",
620
+ "fossilises": "fossilizes",
621
+ "fossilising": "fossilizing",
622
+ "fraternisation": "fraternization",
623
+ "fraternise": "fraternize",
624
+ "fraternised": "fraternized",
625
+ "fraternises": "fraternizes",
626
+ "fraternising": "fraternizing",
627
+ "fulfil": "fulfill",
628
+ "fulfilment": "fulfillment",
629
+ "fulfils": "fulfills",
630
+ "funnelled": "funneled",
631
+ "funnelling": "funneling",
632
+ "gage": "gauge",
633
+ "gaged": "gauged",
634
+ "gages": "gauges",
635
+ "gaging": "gauging",
636
+ "galvanise": "galvanize",
637
+ "galvanised": "galvanized",
638
+ "galvanises": "galvanizes",
639
+ "galvanising": "galvanizing",
640
+ "gambolled": "gamboled",
641
+ "gambolling": "gamboling",
642
+ "gaol": "jail",
643
+ "gaolbird": "jailbird",
644
+ "gaolbirds": "jailbirds",
645
+ "gaolbreak": "jailbreak",
646
+ "gaolbreaks": "jailbreaks",
647
+ "gaoled": "jailed",
648
+ "gaoler": "jailer",
649
+ "gaolers": "jailers",
650
+ "gaoling": "jailing",
651
+ "gaols": "jails",
652
+ "gasses": "gases",
653
+ "generalisation": "generalization",
654
+ "generalisations": "generalizations",
655
+ "generalise": "generalize",
656
+ "generalised": "generalized",
657
+ "generalises": "generalizes",
658
+ "generalising": "generalizing",
659
+ "ghettoise": "ghettoize",
660
+ "ghettoised": "ghettoized",
661
+ "ghettoises": "ghettoizes",
662
+ "ghettoising": "ghettoizing",
663
+ "gipsies": "gypsies",
664
+ "glamor": "glamour",
665
+ "glamorise": "glamorize",
666
+ "glamorised": "glamorized",
667
+ "glamorises": "glamorizes",
668
+ "glamorising": "glamorizing",
669
+ "globalisation": "globalization",
670
+ "globalise": "globalize",
671
+ "globalised": "globalized",
672
+ "globalises": "globalizes",
673
+ "globalising": "globalizing",
674
+ "glueing": "gluing",
675
+ "goitre": "goiter",
676
+ "goitres": "goiters",
677
+ "gonorrhoea": "gonorrhea",
678
+ "gramme": "gram",
679
+ "grammes": "grams",
680
+ "gravelled": "graveled",
681
+ "grey": "gray",
682
+ "greyed": "grayed",
683
+ "greying": "graying",
684
+ "greyish": "grayish",
685
+ "greyness": "grayness",
686
+ "greys": "grays",
687
+ "grovelled": "groveled",
688
+ "grovelling": "groveling",
689
+ "groyne": "groin",
690
+ "groynes": "groins",
691
+ "gruelling": "grueling",
692
+ "gruellingly": "gruelingly",
693
+ "gryphon": "griffin",
694
+ "gryphons": "griffins",
695
+ "gynaecological": "gynecological",
696
+ "gynaecologist": "gynecologist",
697
+ "gynaecologists": "gynecologists",
698
+ "gynaecology": "gynecology",
699
+ "haematological": "hematological",
700
+ "haematologist": "hematologist",
701
+ "haematologists": "hematologists",
702
+ "haematology": "hematology",
703
+ "haemoglobin": "hemoglobin",
704
+ "haemophilia": "hemophilia",
705
+ "haemophiliac": "hemophiliac",
706
+ "haemophiliacs": "hemophiliacs",
707
+ "haemorrhage": "hemorrhage",
708
+ "haemorrhaged": "hemorrhaged",
709
+ "haemorrhages": "hemorrhages",
710
+ "haemorrhaging": "hemorrhaging",
711
+ "haemorrhoids": "hemorrhoids",
712
+ "harbour": "harbor",
713
+ "harboured": "harbored",
714
+ "harbouring": "harboring",
715
+ "harbours": "harbors",
716
+ "harmonisation": "harmonization",
717
+ "harmonise": "harmonize",
718
+ "harmonised": "harmonized",
719
+ "harmonises": "harmonizes",
720
+ "harmonising": "harmonizing",
721
+ "homoeopath": "homeopath",
722
+ "homoeopathic": "homeopathic",
723
+ "homoeopaths": "homeopaths",
724
+ "homoeopathy": "homeopathy",
725
+ "homogenise": "homogenize",
726
+ "homogenised": "homogenized",
727
+ "homogenises": "homogenizes",
728
+ "homogenising": "homogenizing",
729
+ "honour": "honor",
730
+ "honourable": "honorable",
731
+ "honourably": "honorably",
732
+ "honoured": "honored",
733
+ "honouring": "honoring",
734
+ "honours": "honors",
735
+ "hospitalisation": "hospitalization",
736
+ "hospitalise": "hospitalize",
737
+ "hospitalised": "hospitalized",
738
+ "hospitalises": "hospitalizes",
739
+ "hospitalising": "hospitalizing",
740
+ "humanise": "humanize",
741
+ "humanised": "humanized",
742
+ "humanises": "humanizes",
743
+ "humanising": "humanizing",
744
+ "humour": "humor",
745
+ "humoured": "humored",
746
+ "humouring": "humoring",
747
+ "humourless": "humorless",
748
+ "humours": "humors",
749
+ "hybridise": "hybridize",
750
+ "hybridised": "hybridized",
751
+ "hybridises": "hybridizes",
752
+ "hybridising": "hybridizing",
753
+ "hypnotise": "hypnotize",
754
+ "hypnotised": "hypnotized",
755
+ "hypnotises": "hypnotizes",
756
+ "hypnotising": "hypnotizing",
757
+ "hypothesise": "hypothesize",
758
+ "hypothesised": "hypothesized",
759
+ "hypothesises": "hypothesizes",
760
+ "hypothesising": "hypothesizing",
761
+ "idealisation": "idealization",
762
+ "idealise": "idealize",
763
+ "idealised": "idealized",
764
+ "idealises": "idealizes",
765
+ "idealising": "idealizing",
766
+ "idolise": "idolize",
767
+ "idolised": "idolized",
768
+ "idolises": "idolizes",
769
+ "idolising": "idolizing",
770
+ "immobilisation": "immobilization",
771
+ "immobilise": "immobilize",
772
+ "immobilised": "immobilized",
773
+ "immobiliser": "immobilizer",
774
+ "immobilisers": "immobilizers",
775
+ "immobilises": "immobilizes",
776
+ "immobilising": "immobilizing",
777
+ "immortalise": "immortalize",
778
+ "immortalised": "immortalized",
779
+ "immortalises": "immortalizes",
780
+ "immortalising": "immortalizing",
781
+ "immunisation": "immunization",
782
+ "immunise": "immunize",
783
+ "immunised": "immunized",
784
+ "immunises": "immunizes",
785
+ "immunising": "immunizing",
786
+ "impanelled": "impaneled",
787
+ "impanelling": "impaneling",
788
+ "imperilled": "imperiled",
789
+ "imperilling": "imperiling",
790
+ "individualise": "individualize",
791
+ "individualised": "individualized",
792
+ "individualises": "individualizes",
793
+ "individualising": "individualizing",
794
+ "industrialise": "industrialize",
795
+ "industrialised": "industrialized",
796
+ "industrialises": "industrializes",
797
+ "industrialising": "industrializing",
798
+ "inflexion": "inflection",
799
+ "inflexions": "inflections",
800
+ "initialise": "initialize",
801
+ "initialised": "initialized",
802
+ "initialises": "initializes",
803
+ "initialising": "initializing",
804
+ "initialled": "initialed",
805
+ "initialling": "initialing",
806
+ "instal": "install",
807
+ "instalment": "installment",
808
+ "instalments": "installments",
809
+ "instals": "installs",
810
+ "instil": "instill",
811
+ "instils": "instills",
812
+ "institutionalisation": "institutionalization",
813
+ "institutionalise": "institutionalize",
814
+ "institutionalised": "institutionalized",
815
+ "institutionalises": "institutionalizes",
816
+ "institutionalising": "institutionalizing",
817
+ "intellectualise": "intellectualize",
818
+ "intellectualised": "intellectualized",
819
+ "intellectualises": "intellectualizes",
820
+ "intellectualising": "intellectualizing",
821
+ "internalisation": "internalization",
822
+ "internalise": "internalize",
823
+ "internalised": "internalized",
824
+ "internalises": "internalizes",
825
+ "internalising": "internalizing",
826
+ "internationalisation": "internationalization",
827
+ "internationalise": "internationalize",
828
+ "internationalised": "internationalized",
829
+ "internationalises": "internationalizes",
830
+ "internationalising": "internationalizing",
831
+ "ionisation": "ionization",
832
+ "ionise": "ionize",
833
+ "ionised": "ionized",
834
+ "ioniser": "ionizer",
835
+ "ionisers": "ionizers",
836
+ "ionises": "ionizes",
837
+ "ionising": "ionizing",
838
+ "italicise": "italicize",
839
+ "italicised": "italicized",
840
+ "italicises": "italicizes",
841
+ "italicising": "italicizing",
842
+ "itemise": "itemize",
843
+ "itemised": "itemized",
844
+ "itemises": "itemizes",
845
+ "itemising": "itemizing",
846
+ "jeopardise": "jeopardize",
847
+ "jeopardised": "jeopardized",
848
+ "jeopardises": "jeopardizes",
849
+ "jeopardising": "jeopardizing",
850
+ "jewelled": "jeweled",
851
+ "jeweller": "jeweler",
852
+ "jewellers": "jewelers",
853
+ "jewellery": "jewelry",
854
+ "judgement": "judgment",
855
+ "kilogramme": "kilogram",
856
+ "kilogrammes": "kilograms",
857
+ "kilometre": "kilometer",
858
+ "kilometres": "kilometers",
859
+ "labelled": "labeled",
860
+ "labelling": "labeling",
861
+ "labour": "labor",
862
+ "laboured": "labored",
863
+ "labourer": "laborer",
864
+ "labourers": "laborers",
865
+ "labouring": "laboring",
866
+ "labours": "labors",
867
+ "lacklustre": "lackluster",
868
+ "legalisation": "legalization",
869
+ "legalise": "legalize",
870
+ "legalised": "legalized",
871
+ "legalises": "legalizes",
872
+ "legalising": "legalizing",
873
+ "legitimise": "legitimize",
874
+ "legitimised": "legitimized",
875
+ "legitimises": "legitimizes",
876
+ "legitimising": "legitimizing",
877
+ "leukaemia": "leukemia",
878
+ "levelled": "leveled",
879
+ "leveller": "leveler",
880
+ "levellers": "levelers",
881
+ "levelling": "leveling",
882
+ "libelled": "libeled",
883
+ "libelling": "libeling",
884
+ "libellous": "libelous",
885
+ "liberalisation": "liberalization",
886
+ "liberalise": "liberalize",
887
+ "liberalised": "liberalized",
888
+ "liberalises": "liberalizes",
889
+ "liberalising": "liberalizing",
890
+ "licence": "license",
891
+ "licenced": "licensed",
892
+ "licences": "licenses",
893
+ "licencing": "licensing",
894
+ "likeable": "likable",
895
+ "lionisation": "lionization",
896
+ "lionise": "lionize",
897
+ "lionised": "lionized",
898
+ "lionises": "lionizes",
899
+ "lionising": "lionizing",
900
+ "liquidise": "liquidize",
901
+ "liquidised": "liquidized",
902
+ "liquidiser": "liquidizer",
903
+ "liquidisers": "liquidizers",
904
+ "liquidises": "liquidizes",
905
+ "liquidising": "liquidizing",
906
+ "litre": "liter",
907
+ "litres": "liters",
908
+ "localise": "localize",
909
+ "localised": "localized",
910
+ "localises": "localizes",
911
+ "localising": "localizing",
912
+ "louvre": "louver",
913
+ "louvred": "louvered",
914
+ "louvres": "louvers",
915
+ "lustre": "luster",
916
+ "magnetise": "magnetize",
917
+ "magnetised": "magnetized",
918
+ "magnetises": "magnetizes",
919
+ "magnetising": "magnetizing",
920
+ "manoeuvrability": "maneuverability",
921
+ "manoeuvrable": "maneuverable",
922
+ "manoeuvre": "maneuver",
923
+ "manoeuvred": "maneuvered",
924
+ "manoeuvres": "maneuvers",
925
+ "manoeuvring": "maneuvering",
926
+ "manoeuvrings": "maneuverings",
927
+ "marginalisation": "marginalization",
928
+ "marginalise": "marginalize",
929
+ "marginalised": "marginalized",
930
+ "marginalises": "marginalizes",
931
+ "marginalising": "marginalizing",
932
+ "marshalled": "marshaled",
933
+ "marshalling": "marshaling",
934
+ "marvelled": "marveled",
935
+ "marvelling": "marveling",
936
+ "marvellous": "marvelous",
937
+ "marvellously": "marvelously",
938
+ "materialisation": "materialization",
939
+ "materialise": "materialize",
940
+ "materialised": "materialized",
941
+ "materialises": "materializes",
942
+ "materialising": "materializing",
943
+ "maximisation": "maximization",
944
+ "maximise": "maximize",
945
+ "maximised": "maximized",
946
+ "maximises": "maximizes",
947
+ "maximising": "maximizing",
948
+ "meagre": "meager",
949
+ "mechanisation": "mechanization",
950
+ "mechanise": "mechanize",
951
+ "mechanised": "mechanized",
952
+ "mechanises": "mechanizes",
953
+ "mechanising": "mechanizing",
954
+ "mediaeval": "medieval",
955
+ "memorialise": "memorialize",
956
+ "memorialised": "memorialized",
957
+ "memorialises": "memorializes",
958
+ "memorialising": "memorializing",
959
+ "memorise": "memorize",
960
+ "memorised": "memorized",
961
+ "memorises": "memorizes",
962
+ "memorising": "memorizing",
963
+ "mesmerise": "mesmerize",
964
+ "mesmerised": "mesmerized",
965
+ "mesmerises": "mesmerizes",
966
+ "mesmerising": "mesmerizing",
967
+ "metabolise": "metabolize",
968
+ "metabolised": "metabolized",
969
+ "metabolises": "metabolizes",
970
+ "metabolising": "metabolizing",
971
+ "metre": "meter",
972
+ "metres": "meters",
973
+ "mhm": "hmm",
974
+ "micrometre": "micrometer",
975
+ "micrometres": "micrometers",
976
+ "militarise": "militarize",
977
+ "militarised": "militarized",
978
+ "militarises": "militarizes",
979
+ "militarising": "militarizing",
980
+ "milligramme": "milligram",
981
+ "milligrammes": "milligrams",
982
+ "millilitre": "milliliter",
983
+ "millilitres": "milliliters",
984
+ "millimetre": "millimeter",
985
+ "millimetres": "millimeters",
986
+ "miniaturisation": "miniaturization",
987
+ "miniaturise": "miniaturize",
988
+ "miniaturised": "miniaturized",
989
+ "miniaturises": "miniaturizes",
990
+ "miniaturising": "miniaturizing",
991
+ "minibusses": "minibuses",
992
+ "minimise": "minimize",
993
+ "minimised": "minimized",
994
+ "minimises": "minimizes",
995
+ "minimising": "minimizing",
996
+ "misbehaviour": "misbehavior",
997
+ "misdemeanour": "misdemeanor",
998
+ "misdemeanours": "misdemeanors",
999
+ "misspelt": "misspelled",
1000
+ "mitre": "miter",
1001
+ "mitres": "miters",
1002
+ "mm": "hmm",
1003
+ "mmm": "hmm",
1004
+ "mobilisation": "mobilization",
1005
+ "mobilise": "mobilize",
1006
+ "mobilised": "mobilized",
1007
+ "mobilises": "mobilizes",
1008
+ "mobilising": "mobilizing",
1009
+ "modelled": "modeled",
1010
+ "modeller": "modeler",
1011
+ "modellers": "modelers",
1012
+ "modelling": "modeling",
1013
+ "modernise": "modernize",
1014
+ "modernised": "modernized",
1015
+ "modernises": "modernizes",
1016
+ "modernising": "modernizing",
1017
+ "moisturise": "moisturize",
1018
+ "moisturised": "moisturized",
1019
+ "moisturiser": "moisturizer",
1020
+ "moisturisers": "moisturizers",
1021
+ "moisturises": "moisturizes",
1022
+ "moisturising": "moisturizing",
1023
+ "monologue": "monolog",
1024
+ "monologues": "monologs",
1025
+ "monopolisation": "monopolization",
1026
+ "monopolise": "monopolize",
1027
+ "monopolised": "monopolized",
1028
+ "monopolises": "monopolizes",
1029
+ "monopolising": "monopolizing",
1030
+ "moralise": "moralize",
1031
+ "moralised": "moralized",
1032
+ "moralises": "moralizes",
1033
+ "moralising": "moralizing",
1034
+ "motorised": "motorized",
1035
+ "mould": "mold",
1036
+ "moulded": "molded",
1037
+ "moulder": "molder",
1038
+ "mouldered": "moldered",
1039
+ "mouldering": "moldering",
1040
+ "moulders": "molders",
1041
+ "mouldier": "moldier",
1042
+ "mouldiest": "moldiest",
1043
+ "moulding": "molding",
1044
+ "mouldings": "moldings",
1045
+ "moulds": "molds",
1046
+ "mouldy": "moldy",
1047
+ "moult": "molt",
1048
+ "moulted": "molted",
1049
+ "moulting": "molting",
1050
+ "moults": "molts",
1051
+ "moustache": "mustache",
1052
+ "moustached": "mustached",
1053
+ "moustaches": "mustaches",
1054
+ "moustachioed": "mustachioed",
1055
+ "multicoloured": "multicolored",
1056
+ "nationalisation": "nationalization",
1057
+ "nationalisations": "nationalizations",
1058
+ "nationalise": "nationalize",
1059
+ "nationalised": "nationalized",
1060
+ "nationalises": "nationalizes",
1061
+ "nationalising": "nationalizing",
1062
+ "naturalisation": "naturalization",
1063
+ "naturalise": "naturalize",
1064
+ "naturalised": "naturalized",
1065
+ "naturalises": "naturalizes",
1066
+ "naturalising": "naturalizing",
1067
+ "neighbour": "neighbor",
1068
+ "neighbourhood": "neighborhood",
1069
+ "neighbourhoods": "neighborhoods",
1070
+ "neighbouring": "neighboring",
1071
+ "neighbourliness": "neighborliness",
1072
+ "neighbourly": "neighborly",
1073
+ "neighbours": "neighbors",
1074
+ "neutralisation": "neutralization",
1075
+ "neutralise": "neutralize",
1076
+ "neutralised": "neutralized",
1077
+ "neutralises": "neutralizes",
1078
+ "neutralising": "neutralizing",
1079
+ "normalisation": "normalization",
1080
+ "normalise": "normalize",
1081
+ "normalised": "normalized",
1082
+ "normalises": "normalizes",
1083
+ "normalising": "normalizing",
1084
+ "odour": "odor",
1085
+ "odourless": "odorless",
1086
+ "odours": "odors",
1087
+ "oesophagus": "esophagus",
1088
+ "oesophaguses": "esophaguses",
1089
+ "oestrogen": "estrogen",
1090
+ "offence": "offense",
1091
+ "offences": "offenses",
1092
+ "omelette": "omelet",
1093
+ "omelettes": "omelets",
1094
+ "optimise": "optimize",
1095
+ "optimised": "optimized",
1096
+ "optimises": "optimizes",
1097
+ "optimising": "optimizing",
1098
+ "organisation": "organization",
1099
+ "organisational": "organizational",
1100
+ "organisations": "organizations",
1101
+ "organise": "organize",
1102
+ "organised": "organized",
1103
+ "organiser": "organizer",
1104
+ "organisers": "organizers",
1105
+ "organises": "organizes",
1106
+ "organising": "organizing",
1107
+ "orthopaedic": "orthopedic",
1108
+ "orthopaedics": "orthopedics",
1109
+ "ostracise": "ostracize",
1110
+ "ostracised": "ostracized",
1111
+ "ostracises": "ostracizes",
1112
+ "ostracising": "ostracizing",
1113
+ "outmanoeuvre": "outmaneuver",
1114
+ "outmanoeuvred": "outmaneuvered",
1115
+ "outmanoeuvres": "outmaneuvers",
1116
+ "outmanoeuvring": "outmaneuvering",
1117
+ "overemphasise": "overemphasize",
1118
+ "overemphasised": "overemphasized",
1119
+ "overemphasises": "overemphasizes",
1120
+ "overemphasising": "overemphasizing",
1121
+ "oxidisation": "oxidization",
1122
+ "oxidise": "oxidize",
1123
+ "oxidised": "oxidized",
1124
+ "oxidises": "oxidizes",
1125
+ "oxidising": "oxidizing",
1126
+ "paederast": "pederast",
1127
+ "paederasts": "pederasts",
1128
+ "paediatric": "pediatric",
1129
+ "paediatrician": "pediatrician",
1130
+ "paediatricians": "pediatricians",
1131
+ "paediatrics": "pediatrics",
1132
+ "paedophile": "pedophile",
1133
+ "paedophiles": "pedophiles",
1134
+ "paedophilia": "pedophilia",
1135
+ "palaeolithic": "paleolithic",
1136
+ "palaeontologist": "paleontologist",
1137
+ "palaeontologists": "paleontologists",
1138
+ "palaeontology": "paleontology",
1139
+ "panelled": "paneled",
1140
+ "panelling": "paneling",
1141
+ "panellist": "panelist",
1142
+ "panellists": "panelists",
1143
+ "paralyse": "paralyze",
1144
+ "paralysed": "paralyzed",
1145
+ "paralyses": "paralyzes",
1146
+ "paralysing": "paralyzing",
1147
+ "parcelled": "parceled",
1148
+ "parcelling": "parceling",
1149
+ "parlour": "parlor",
1150
+ "parlours": "parlors",
1151
+ "particularise": "particularize",
1152
+ "particularised": "particularized",
1153
+ "particularises": "particularizes",
1154
+ "particularising": "particularizing",
1155
+ "passivisation": "passivization",
1156
+ "passivise": "passivize",
1157
+ "passivised": "passivized",
1158
+ "passivises": "passivizes",
1159
+ "passivising": "passivizing",
1160
+ "pasteurisation": "pasteurization",
1161
+ "pasteurise": "pasteurize",
1162
+ "pasteurised": "pasteurized",
1163
+ "pasteurises": "pasteurizes",
1164
+ "pasteurising": "pasteurizing",
1165
+ "patronise": "patronize",
1166
+ "patronised": "patronized",
1167
+ "patronises": "patronizes",
1168
+ "patronising": "patronizing",
1169
+ "patronisingly": "patronizingly",
1170
+ "pedalled": "pedaled",
1171
+ "pedalling": "pedaling",
1172
+ "pedestrianisation": "pedestrianization",
1173
+ "pedestrianise": "pedestrianize",
1174
+ "pedestrianised": "pedestrianized",
1175
+ "pedestrianises": "pedestrianizes",
1176
+ "pedestrianising": "pedestrianizing",
1177
+ "penalise": "penalize",
1178
+ "penalised": "penalized",
1179
+ "penalises": "penalizes",
1180
+ "penalising": "penalizing",
1181
+ "pencilled": "penciled",
1182
+ "pencilling": "penciling",
1183
+ "personalise": "personalize",
1184
+ "personalised": "personalized",
1185
+ "personalises": "personalizes",
1186
+ "personalising": "personalizing",
1187
+ "pharmacopoeia": "pharmacopeia",
1188
+ "pharmacopoeias": "pharmacopeias",
1189
+ "philosophise": "philosophize",
1190
+ "philosophised": "philosophized",
1191
+ "philosophises": "philosophizes",
1192
+ "philosophising": "philosophizing",
1193
+ "philtre": "filter",
1194
+ "philtres": "filters",
1195
+ "phoney": "phony",
1196
+ "plagiarise": "plagiarize",
1197
+ "plagiarised": "plagiarized",
1198
+ "plagiarises": "plagiarizes",
1199
+ "plagiarising": "plagiarizing",
1200
+ "plough": "plow",
1201
+ "ploughed": "plowed",
1202
+ "ploughing": "plowing",
1203
+ "ploughman": "plowman",
1204
+ "ploughmen": "plowmen",
1205
+ "ploughs": "plows",
1206
+ "ploughshare": "plowshare",
1207
+ "ploughshares": "plowshares",
1208
+ "polarisation": "polarization",
1209
+ "polarise": "polarize",
1210
+ "polarised": "polarized",
1211
+ "polarises": "polarizes",
1212
+ "polarising": "polarizing",
1213
+ "politicisation": "politicization",
1214
+ "politicise": "politicize",
1215
+ "politicised": "politicized",
1216
+ "politicises": "politicizes",
1217
+ "politicising": "politicizing",
1218
+ "popularisation": "popularization",
1219
+ "popularise": "popularize",
1220
+ "popularised": "popularized",
1221
+ "popularises": "popularizes",
1222
+ "popularising": "popularizing",
1223
+ "pouffe": "pouf",
1224
+ "pouffes": "poufs",
1225
+ "practise": "practice",
1226
+ "practised": "practiced",
1227
+ "practises": "practices",
1228
+ "practising": "practicing",
1229
+ "praesidium": "presidium",
1230
+ "praesidiums": "presidiums",
1231
+ "pressurisation": "pressurization",
1232
+ "pressurise": "pressurize",
1233
+ "pressurised": "pressurized",
1234
+ "pressurises": "pressurizes",
1235
+ "pressurising": "pressurizing",
1236
+ "pretence": "pretense",
1237
+ "pretences": "pretenses",
1238
+ "primaeval": "primeval",
1239
+ "prioritisation": "prioritization",
1240
+ "prioritise": "prioritize",
1241
+ "prioritised": "prioritized",
1242
+ "prioritises": "prioritizes",
1243
+ "prioritising": "prioritizing",
1244
+ "privatisation": "privatization",
1245
+ "privatisations": "privatizations",
1246
+ "privatise": "privatize",
1247
+ "privatised": "privatized",
1248
+ "privatises": "privatizes",
1249
+ "privatising": "privatizing",
1250
+ "professionalisation": "professionalization",
1251
+ "professionalise": "professionalize",
1252
+ "professionalised": "professionalized",
1253
+ "professionalises": "professionalizes",
1254
+ "professionalising": "professionalizing",
1255
+ "programme": "program",
1256
+ "programmes": "programs",
1257
+ "prologue": "prolog",
1258
+ "prologues": "prologs",
1259
+ "propagandise": "propagandize",
1260
+ "propagandised": "propagandized",
1261
+ "propagandises": "propagandizes",
1262
+ "propagandising": "propagandizing",
1263
+ "proselytise": "proselytize",
1264
+ "proselytised": "proselytized",
1265
+ "proselytiser": "proselytizer",
1266
+ "proselytisers": "proselytizers",
1267
+ "proselytises": "proselytizes",
1268
+ "proselytising": "proselytizing",
1269
+ "psychoanalyse": "psychoanalyze",
1270
+ "psychoanalysed": "psychoanalyzed",
1271
+ "psychoanalyses": "psychoanalyzes",
1272
+ "psychoanalysing": "psychoanalyzing",
1273
+ "publicise": "publicize",
1274
+ "publicised": "publicized",
1275
+ "publicises": "publicizes",
1276
+ "publicising": "publicizing",
1277
+ "pulverisation": "pulverization",
1278
+ "pulverise": "pulverize",
1279
+ "pulverised": "pulverized",
1280
+ "pulverises": "pulverizes",
1281
+ "pulverising": "pulverizing",
1282
+ "pummelled": "pummel",
1283
+ "pummelling": "pummeled",
1284
+ "pyjama": "pajama",
1285
+ "pyjamas": "pajamas",
1286
+ "pzazz": "pizzazz",
1287
+ "quarrelled": "quarreled",
1288
+ "quarrelling": "quarreling",
1289
+ "radicalise": "radicalize",
1290
+ "radicalised": "radicalized",
1291
+ "radicalises": "radicalizes",
1292
+ "radicalising": "radicalizing",
1293
+ "rancour": "rancor",
1294
+ "randomise": "randomize",
1295
+ "randomised": "randomized",
1296
+ "randomises": "randomizes",
1297
+ "randomising": "randomizing",
1298
+ "rationalisation": "rationalization",
1299
+ "rationalisations": "rationalizations",
1300
+ "rationalise": "rationalize",
1301
+ "rationalised": "rationalized",
1302
+ "rationalises": "rationalizes",
1303
+ "rationalising": "rationalizing",
1304
+ "ravelled": "raveled",
1305
+ "ravelling": "raveling",
1306
+ "realisable": "realizable",
1307
+ "realisation": "realization",
1308
+ "realisations": "realizations",
1309
+ "realise": "realize",
1310
+ "realised": "realized",
1311
+ "realises": "realizes",
1312
+ "realising": "realizing",
1313
+ "recognisable": "recognizable",
1314
+ "recognisably": "recognizably",
1315
+ "recognisance": "recognizance",
1316
+ "recognise": "recognize",
1317
+ "recognised": "recognized",
1318
+ "recognises": "recognizes",
1319
+ "recognising": "recognizing",
1320
+ "reconnoitre": "reconnoiter",
1321
+ "reconnoitred": "reconnoitered",
1322
+ "reconnoitres": "reconnoiters",
1323
+ "reconnoitring": "reconnoitering",
1324
+ "refuelled": "refueled",
1325
+ "refuelling": "refueling",
1326
+ "regularisation": "regularization",
1327
+ "regularise": "regularize",
1328
+ "regularised": "regularized",
1329
+ "regularises": "regularizes",
1330
+ "regularising": "regularizing",
1331
+ "remodelled": "remodeled",
1332
+ "remodelling": "remodeling",
1333
+ "remould": "remold",
1334
+ "remoulded": "remolded",
1335
+ "remoulding": "remolding",
1336
+ "remoulds": "remolds",
1337
+ "reorganisation": "reorganization",
1338
+ "reorganisations": "reorganizations",
1339
+ "reorganise": "reorganize",
1340
+ "reorganised": "reorganized",
1341
+ "reorganises": "reorganizes",
1342
+ "reorganising": "reorganizing",
1343
+ "revelled": "reveled",
1344
+ "reveller": "reveler",
1345
+ "revellers": "revelers",
1346
+ "revelling": "reveling",
1347
+ "revitalise": "revitalize",
1348
+ "revitalised": "revitalized",
1349
+ "revitalises": "revitalizes",
1350
+ "revitalising": "revitalizing",
1351
+ "revolutionise": "revolutionize",
1352
+ "revolutionised": "revolutionized",
1353
+ "revolutionises": "revolutionizes",
1354
+ "revolutionising": "revolutionizing",
1355
+ "rhapsodise": "rhapsodize",
1356
+ "rhapsodised": "rhapsodized",
1357
+ "rhapsodises": "rhapsodizes",
1358
+ "rhapsodising": "rhapsodizing",
1359
+ "rigour": "rigor",
1360
+ "rigours": "rigors",
1361
+ "ritualised": "ritualized",
1362
+ "rivalled": "rivaled",
1363
+ "rivalling": "rivaling",
1364
+ "romanticise": "romanticize",
1365
+ "romanticised": "romanticized",
1366
+ "romanticises": "romanticizes",
1367
+ "romanticising": "romanticizing",
1368
+ "rumour": "rumor",
1369
+ "rumoured": "rumored",
1370
+ "rumours": "rumors",
1371
+ "sabre": "saber",
1372
+ "sabres": "sabers",
1373
+ "saltpetre": "saltpeter",
1374
+ "sanitise": "sanitize",
1375
+ "sanitised": "sanitized",
1376
+ "sanitises": "sanitizes",
1377
+ "sanitising": "sanitizing",
1378
+ "satirise": "satirize",
1379
+ "satirised": "satirized",
1380
+ "satirises": "satirizes",
1381
+ "satirising": "satirizing",
1382
+ "saviour": "savior",
1383
+ "saviours": "saviors",
1384
+ "savour": "savor",
1385
+ "savoured": "savored",
1386
+ "savouries": "savories",
1387
+ "savouring": "savoring",
1388
+ "savours": "savors",
1389
+ "savoury": "savory",
1390
+ "scandalise": "scandalize",
1391
+ "scandalised": "scandalized",
1392
+ "scandalises": "scandalizes",
1393
+ "scandalising": "scandalizing",
1394
+ "sceptic": "skeptic",
1395
+ "sceptical": "skeptical",
1396
+ "sceptically": "skeptically",
1397
+ "scepticism": "skepticism",
1398
+ "sceptics": "skeptics",
1399
+ "sceptre": "scepter",
1400
+ "sceptres": "scepters",
1401
+ "scrutinise": "scrutinize",
1402
+ "scrutinised": "scrutinized",
1403
+ "scrutinises": "scrutinizes",
1404
+ "scrutinising": "scrutinizing",
1405
+ "secularisation": "secularization",
1406
+ "secularise": "secularize",
1407
+ "secularised": "secularized",
1408
+ "secularises": "secularizes",
1409
+ "secularising": "secularizing",
1410
+ "sensationalise": "sensationalize",
1411
+ "sensationalised": "sensationalized",
1412
+ "sensationalises": "sensationalizes",
1413
+ "sensationalising": "sensationalizing",
1414
+ "sensitise": "sensitize",
1415
+ "sensitised": "sensitized",
1416
+ "sensitises": "sensitizes",
1417
+ "sensitising": "sensitizing",
1418
+ "sentimentalise": "sentimentalize",
1419
+ "sentimentalised": "sentimentalized",
1420
+ "sentimentalises": "sentimentalizes",
1421
+ "sentimentalising": "sentimentalizing",
1422
+ "sepulchre": "sepulcher",
1423
+ "sepulchres": "sepulchers",
1424
+ "serialisation": "serialization",
1425
+ "serialisations": "serializations",
1426
+ "serialise": "serialize",
1427
+ "serialised": "serialized",
1428
+ "serialises": "serializes",
1429
+ "serialising": "serializing",
1430
+ "sermonise": "sermonize",
1431
+ "sermonised": "sermonized",
1432
+ "sermonises": "sermonizes",
1433
+ "sermonising": "sermonizing",
1434
+ "sheikh": "sheik",
1435
+ "shovelled": "shoveled",
1436
+ "shovelling": "shoveling",
1437
+ "shrivelled": "shriveled",
1438
+ "shrivelling": "shriveling",
1439
+ "signalise": "signalize",
1440
+ "signalised": "signalized",
1441
+ "signalises": "signalizes",
1442
+ "signalising": "signalizing",
1443
+ "signalled": "signaled",
1444
+ "signalling": "signaling",
1445
+ "smoulder": "smolder",
1446
+ "smouldered": "smoldered",
1447
+ "smouldering": "smoldering",
1448
+ "smoulders": "smolders",
1449
+ "snivelled": "sniveled",
1450
+ "snivelling": "sniveling",
1451
+ "snorkelled": "snorkeled",
1452
+ "snorkelling": "snorkeling",
1453
+ "snowplough": "snowplow",
1454
+ "snowploughs": "snowplow",
1455
+ "socialisation": "socialization",
1456
+ "socialise": "socialize",
1457
+ "socialised": "socialized",
1458
+ "socialises": "socializes",
1459
+ "socialising": "socializing",
1460
+ "sodomise": "sodomize",
1461
+ "sodomised": "sodomized",
1462
+ "sodomises": "sodomizes",
1463
+ "sodomising": "sodomizing",
1464
+ "solemnise": "solemnize",
1465
+ "solemnised": "solemnized",
1466
+ "solemnises": "solemnizes",
1467
+ "solemnising": "solemnizing",
1468
+ "sombre": "somber",
1469
+ "specialisation": "specialization",
1470
+ "specialisations": "specializations",
1471
+ "specialise": "specialize",
1472
+ "specialised": "specialized",
1473
+ "specialises": "specializes",
1474
+ "specialising": "specializing",
1475
+ "spectre": "specter",
1476
+ "spectres": "specters",
1477
+ "spiralled": "spiraled",
1478
+ "spiralling": "spiraling",
1479
+ "splendour": "splendor",
1480
+ "splendours": "splendors",
1481
+ "squirrelled": "squirreled",
1482
+ "squirrelling": "squirreling",
1483
+ "stabilisation": "stabilization",
1484
+ "stabilise": "stabilize",
1485
+ "stabilised": "stabilized",
1486
+ "stabiliser": "stabilizer",
1487
+ "stabilisers": "stabilizers",
1488
+ "stabilises": "stabilizes",
1489
+ "stabilising": "stabilizing",
1490
+ "standardisation": "standardization",
1491
+ "standardise": "standardize",
1492
+ "standardised": "standardized",
1493
+ "standardises": "standardizes",
1494
+ "standardising": "standardizing",
1495
+ "stencilled": "stenciled",
1496
+ "stencilling": "stenciling",
1497
+ "sterilisation": "sterilization",
1498
+ "sterilisations": "sterilizations",
1499
+ "sterilise": "sterilize",
1500
+ "sterilised": "sterilized",
1501
+ "steriliser": "sterilizer",
1502
+ "sterilisers": "sterilizers",
1503
+ "sterilises": "sterilizes",
1504
+ "sterilising": "sterilizing",
1505
+ "stigmatisation": "stigmatization",
1506
+ "stigmatise": "stigmatize",
1507
+ "stigmatised": "stigmatized",
1508
+ "stigmatises": "stigmatizes",
1509
+ "stigmatising": "stigmatizing",
1510
+ "storey": "story",
1511
+ "storeys": "stories",
1512
+ "subsidisation": "subsidization",
1513
+ "subsidise": "subsidize",
1514
+ "subsidised": "subsidized",
1515
+ "subsidiser": "subsidizer",
1516
+ "subsidisers": "subsidizers",
1517
+ "subsidises": "subsidizes",
1518
+ "subsidising": "subsidizing",
1519
+ "succour": "succor",
1520
+ "succoured": "succored",
1521
+ "succouring": "succoring",
1522
+ "succours": "succors",
1523
+ "sulphate": "sulfate",
1524
+ "sulphates": "sulfates",
1525
+ "sulphide": "sulfide",
1526
+ "sulphides": "sulfides",
1527
+ "sulphur": "sulfur",
1528
+ "sulphurous": "sulfurous",
1529
+ "summarise": "summarize",
1530
+ "summarised": "summarized",
1531
+ "summarises": "summarizes",
1532
+ "summarising": "summarizing",
1533
+ "swivelled": "swiveled",
1534
+ "swivelling": "swiveling",
1535
+ "symbolise": "symbolize",
1536
+ "symbolised": "symbolized",
1537
+ "symbolises": "symbolizes",
1538
+ "symbolising": "symbolizing",
1539
+ "sympathise": "sympathize",
1540
+ "sympathised": "sympathized",
1541
+ "sympathiser": "sympathizer",
1542
+ "sympathisers": "sympathizers",
1543
+ "sympathises": "sympathizes",
1544
+ "sympathising": "sympathizing",
1545
+ "synchronisation": "synchronization",
1546
+ "synchronise": "synchronize",
1547
+ "synchronised": "synchronized",
1548
+ "synchronises": "synchronizes",
1549
+ "synchronising": "synchronizing",
1550
+ "synthesise": "synthesize",
1551
+ "synthesised": "synthesized",
1552
+ "synthesiser": "synthesizer",
1553
+ "synthesisers": "synthesizers",
1554
+ "synthesises": "synthesizes",
1555
+ "synthesising": "synthesizing",
1556
+ "syphon": "siphon",
1557
+ "syphoned": "siphoned",
1558
+ "syphoning": "siphoning",
1559
+ "syphons": "siphons",
1560
+ "systematisation": "systematization",
1561
+ "systematise": "systematize",
1562
+ "systematised": "systematized",
1563
+ "systematises": "systematizes",
1564
+ "systematising": "systematizing",
1565
+ "tantalise": "tantalize",
1566
+ "tantalised": "tantalized",
1567
+ "tantalises": "tantalizes",
1568
+ "tantalising": "tantalizing",
1569
+ "tantalisingly": "tantalizingly",
1570
+ "tasselled": "tasseled",
1571
+ "technicolour": "technicolor",
1572
+ "temporise": "temporize",
1573
+ "temporised": "temporized",
1574
+ "temporises": "temporizes",
1575
+ "temporising": "temporizing",
1576
+ "tenderise": "tenderize",
1577
+ "tenderised": "tenderized",
1578
+ "tenderises": "tenderizes",
1579
+ "tenderising": "tenderizing",
1580
+ "terrorise": "terrorize",
1581
+ "terrorised": "terrorized",
1582
+ "terrorises": "terrorizes",
1583
+ "terrorising": "terrorizing",
1584
+ "theatre": "theater",
1585
+ "theatregoer": "theatergoer",
1586
+ "theatregoers": "theatergoers",
1587
+ "theatres": "theaters",
1588
+ "theorise": "theorize",
1589
+ "theorised": "theorized",
1590
+ "theorises": "theorizes",
1591
+ "theorising": "theorizing",
1592
+ "tonne": "ton",
1593
+ "tonnes": "tons",
1594
+ "towelled": "toweled",
1595
+ "towelling": "toweling",
1596
+ "toxaemia": "toxemia",
1597
+ "tranquillise": "tranquilize",
1598
+ "tranquillised": "tranquilized",
1599
+ "tranquilliser": "tranquilizer",
1600
+ "tranquillisers": "tranquilizers",
1601
+ "tranquillises": "tranquilizes",
1602
+ "tranquillising": "tranquilizing",
1603
+ "tranquillity": "tranquility",
1604
+ "tranquillize": "tranquilize",
1605
+ "tranquillized": "tranquilized",
1606
+ "tranquillizer": "tranquilizer",
1607
+ "tranquillizers": "tranquilizers",
1608
+ "tranquillizes": "tranquilizes",
1609
+ "tranquillizing": "tranquilizing",
1610
+ "tranquilly": "tranquility",
1611
+ "transistorised": "transistorized",
1612
+ "traumatise": "traumatize",
1613
+ "traumatised": "traumatized",
1614
+ "traumatises": "traumatizes",
1615
+ "traumatising": "traumatizing",
1616
+ "travelled": "traveled",
1617
+ "traveller": "traveler",
1618
+ "travellers": "travelers",
1619
+ "travelling": "traveling",
1620
+ "travelog": "travelogue",
1621
+ "travelogs": "travelogues",
1622
+ "trialled": "trialed",
1623
+ "trialling": "trialing",
1624
+ "tricolour": "tricolor",
1625
+ "tricolours": "tricolors",
1626
+ "trivialise": "trivialize",
1627
+ "trivialised": "trivialized",
1628
+ "trivialises": "trivializes",
1629
+ "trivialising": "trivializing",
1630
+ "tumour": "tumor",
1631
+ "tumours": "tumors",
1632
+ "tunnelled": "tunneled",
1633
+ "tunnelling": "tunneling",
1634
+ "tyrannise": "tyrannize",
1635
+ "tyrannised": "tyrannized",
1636
+ "tyrannises": "tyrannizes",
1637
+ "tyrannising": "tyrannizing",
1638
+ "tyre": "tire",
1639
+ "tyres": "tires",
1640
+ "unauthorised": "unauthorized",
1641
+ "uncivilised": "uncivilized",
1642
+ "underutilised": "underutilized",
1643
+ "unequalled": "unequaled",
1644
+ "unfavourable": "unfavorable",
1645
+ "unfavourably": "unfavorably",
1646
+ "unionisation": "unionization",
1647
+ "unionise": "unionize",
1648
+ "unionised": "unionized",
1649
+ "unionises": "unionizes",
1650
+ "unionising": "unionizing",
1651
+ "unorganised": "unorganized",
1652
+ "unravelled": "unraveled",
1653
+ "unravelling": "unraveling",
1654
+ "unrecognisable": "unrecognizable",
1655
+ "unrecognised": "unrecognized",
1656
+ "unrivalled": "unrivaled",
1657
+ "unsavoury": "unsavory",
1658
+ "untrammelled": "untrammeled",
1659
+ "urbanisation": "urbanization",
1660
+ "urbanise": "urbanize",
1661
+ "urbanised": "urbanized",
1662
+ "urbanises": "urbanizes",
1663
+ "urbanising": "urbanizing",
1664
+ "utilisable": "utilizable",
1665
+ "utilisation": "utilization",
1666
+ "utilise": "utilize",
1667
+ "utilised": "utilized",
1668
+ "utilises": "utilizes",
1669
+ "utilising": "utilizing",
1670
+ "valour": "valor",
1671
+ "vandalise": "vandalize",
1672
+ "vandalised": "vandalized",
1673
+ "vandalises": "vandalizes",
1674
+ "vandalising": "vandalizing",
1675
+ "vaporisation": "vaporization",
1676
+ "vaporise": "vaporize",
1677
+ "vaporised": "vaporized",
1678
+ "vaporises": "vaporizes",
1679
+ "vaporising": "vaporizing",
1680
+ "vapour": "vapor",
1681
+ "vapours": "vapors",
1682
+ "verbalise": "verbalize",
1683
+ "verbalised": "verbalized",
1684
+ "verbalises": "verbalizes",
1685
+ "verbalising": "verbalizing",
1686
+ "victimisation": "victimization",
1687
+ "victimise": "victimize",
1688
+ "victimised": "victimized",
1689
+ "victimises": "victimizes",
1690
+ "victimising": "victimizing",
1691
+ "videodisc": "videodisk",
1692
+ "videodiscs": "videodisks",
1693
+ "vigour": "vigor",
1694
+ "visualisation": "visualization",
1695
+ "visualisations": "visualizations",
1696
+ "visualise": "visualize",
1697
+ "visualised": "visualized",
1698
+ "visualises": "visualizes",
1699
+ "visualising": "visualizing",
1700
+ "vocalisation": "vocalization",
1701
+ "vocalisations": "vocalizations",
1702
+ "vocalise": "vocalize",
1703
+ "vocalised": "vocalized",
1704
+ "vocalises": "vocalizes",
1705
+ "vocalising": "vocalizing",
1706
+ "vulcanised": "vulcanized",
1707
+ "vulgarisation": "vulgarization",
1708
+ "vulgarise": "vulgarize",
1709
+ "vulgarised": "vulgarized",
1710
+ "vulgarises": "vulgarizes",
1711
+ "vulgarising": "vulgarizing",
1712
+ "waggon": "wagon",
1713
+ "waggons": "wagons",
1714
+ "watercolour": "watercolor",
1715
+ "watercolours": "watercolors",
1716
+ "weaselled": "weaseled",
1717
+ "weaselling": "weaseling",
1718
+ "westernisation": "westernization",
1719
+ "westernise": "westernize",
1720
+ "westernised": "westernized",
1721
+ "westernises": "westernizes",
1722
+ "westernising": "westernizing",
1723
+ "womanise": "womanize",
1724
+ "womanised": "womanized",
1725
+ "womaniser": "womanizer",
1726
+ "womanisers": "womanizers",
1727
+ "womanises": "womanizes",
1728
+ "womanising": "womanizing",
1729
+ "woollen": "woolen",
1730
+ "woollens": "woolens",
1731
+ "woollies": "woolies",
1732
+ "woolly": "wooly",
1733
+ "worshipped": "worshiped",
1734
+ "worshipper": "worshiper",
1735
+ "worshipping": "worshiping",
1736
+ "yodelled": "yodeled",
1737
+ "yodelling": "yodeling",
1738
+ "yoghourt": "yogurt",
1739
+ "yoghourts": "yogurts",
1740
+ "yoghurt": "yogurt",
1741
+ "yoghurts": "yogurts"
1742
+ }
whisper-small-flaxtest/preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length": 30,
3
+ "feature_extractor_type": "WhisperFeatureExtractor",
4
+ "feature_size": 80,
5
+ "hop_length": 160,
6
+ "n_fft": 400,
7
+ "n_samples": 480000,
8
+ "nb_max_frames": 3000,
9
+ "padding_side": "right",
10
+ "padding_value": 0.0,
11
+ "processor_class": "WhisperProcessor",
12
+ "return_attention_mask": false,
13
+ "sampling_rate": 16000
14
+ }
whisper-small-flaxtest/special_tokens_map.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|startoftranscript|>",
4
+ "<|en|>",
5
+ "<|zh|>",
6
+ "<|de|>",
7
+ "<|es|>",
8
+ "<|ru|>",
9
+ "<|ko|>",
10
+ "<|fr|>",
11
+ "<|ja|>",
12
+ "<|pt|>",
13
+ "<|tr|>",
14
+ "<|pl|>",
15
+ "<|ca|>",
16
+ "<|nl|>",
17
+ "<|ar|>",
18
+ "<|sv|>",
19
+ "<|it|>",
20
+ "<|id|>",
21
+ "<|hi|>",
22
+ "<|fi|>",
23
+ "<|vi|>",
24
+ "<|iw|>",
25
+ "<|uk|>",
26
+ "<|el|>",
27
+ "<|ms|>",
28
+ "<|cs|>",
29
+ "<|ro|>",
30
+ "<|da|>",
31
+ "<|hu|>",
32
+ "<|ta|>",
33
+ "<|no|>",
34
+ "<|th|>",
35
+ "<|ur|>",
36
+ "<|hr|>",
37
+ "<|bg|>",
38
+ "<|lt|>",
39
+ "<|la|>",
40
+ "<|mi|>",
41
+ "<|ml|>",
42
+ "<|cy|>",
43
+ "<|sk|>",
44
+ "<|te|>",
45
+ "<|fa|>",
46
+ "<|lv|>",
47
+ "<|bn|>",
48
+ "<|sr|>",
49
+ "<|az|>",
50
+ "<|sl|>",
51
+ "<|kn|>",
52
+ "<|et|>",
53
+ "<|mk|>",
54
+ "<|br|>",
55
+ "<|eu|>",
56
+ "<|is|>",
57
+ "<|hy|>",
58
+ "<|ne|>",
59
+ "<|mn|>",
60
+ "<|bs|>",
61
+ "<|kk|>",
62
+ "<|sq|>",
63
+ "<|sw|>",
64
+ "<|gl|>",
65
+ "<|mr|>",
66
+ "<|pa|>",
67
+ "<|si|>",
68
+ "<|km|>",
69
+ "<|sn|>",
70
+ "<|yo|>",
71
+ "<|so|>",
72
+ "<|af|>",
73
+ "<|oc|>",
74
+ "<|ka|>",
75
+ "<|be|>",
76
+ "<|tg|>",
77
+ "<|sd|>",
78
+ "<|gu|>",
79
+ "<|am|>",
80
+ "<|yi|>",
81
+ "<|lo|>",
82
+ "<|uz|>",
83
+ "<|fo|>",
84
+ "<|ht|>",
85
+ "<|ps|>",
86
+ "<|tk|>",
87
+ "<|nn|>",
88
+ "<|mt|>",
89
+ "<|sa|>",
90
+ "<|lb|>",
91
+ "<|my|>",
92
+ "<|bo|>",
93
+ "<|tl|>",
94
+ "<|mg|>",
95
+ "<|as|>",
96
+ "<|tt|>",
97
+ "<|haw|>",
98
+ "<|ln|>",
99
+ "<|ha|>",
100
+ "<|ba|>",
101
+ "<|jw|>",
102
+ "<|su|>",
103
+ "<|translate|>",
104
+ "<|transcribe|>",
105
+ "<|startoflm|>",
106
+ "<|startofprev|>",
107
+ "<|nocaptions|>",
108
+ "<|notimestamps|>"
109
+ ],
110
+ "bos_token": "<|endoftext|>",
111
+ "eos_token": "<|endoftext|>",
112
+ "pad_token": "<|endoftext|>",
113
+ "unk_token": "<|endoftext|>"
114
+ }
whisper-small-flaxtest/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
whisper-small-flaxtest/tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 1024,
22
+ "pad_token": null,
23
+ "processor_class": "WhisperProcessor",
24
+ "return_attention_mask": false,
25
+ "special_tokens_map_file": null,
26
+ "tokenizer_class": "WhisperTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<|endoftext|>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
whisper-small-flaxtest/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
whisper-tiny-ft-dummy/events.out.tfevents.1677582573.t1v-n-d163ce9a-w-0.1276805.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28df6cfd95c9539437e6d7457e72914758a48bc8c81c04d68d45c9beec6b5bdc
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677585137.t1v-n-d163ce9a-w-0.1284051.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0facb62340abd1a1ec68e3ddb84c1e26464d970168d3c5b8c3c52ec5d52de1d6
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677587059.t1v-n-d163ce9a-w-0.1287692.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e14a45dd8b6f3c888c4779ee8bcd6b20049ea8d45e1b0f6c34b7455dddfff433
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677587350.t1v-n-d163ce9a-w-0.1292303.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6378de6a369cc141269aaff4436e172c11d71a3b1dcc2d11cdae62da20d488c7
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677588068.t1v-n-d163ce9a-w-0.1297330.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92bbb8a3a7af5963003ed46c79ce553a9f98f9897c228c98e662440c90f1c8c5
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677588142.t1v-n-d163ce9a-w-0.1301760.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27b4453568339824f34c48944b0bf00ca2a4e603301b5d10dbafa527b557b776
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677588581.t1v-n-d163ce9a-w-0.1306471.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04d88ab6f9b46f027b704a597a8a2136d6260304a365e1c303e9269e2c9e5b2d
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677590425.t1v-n-d163ce9a-w-0.1318486.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3e2d7e93cfc0d7e463bbe781f6a502d64db4721d515e2ca070dd1673ecf39fc
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677590675.t1v-n-d163ce9a-w-0.1323104.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc61c0bcaa754613477811c72780874609f1047b22afc279f3ec8458d68cd1eb
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677591418.t1v-n-d163ce9a-w-0.1328351.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c6b869dc6ce4f75deb0966514f50eb606d7e808bcd6d51a9bd40f4b1f5902da
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677591678.t1v-n-d163ce9a-w-0.1333009.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0f75dd974d807c09a331f5b745d02ccf57c483cd88e094f12633f5a5acf2fa4
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677591869.t1v-n-d163ce9a-w-0.1337579.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76910deccce21a87d5b2b12eb9b71a6ffbf998d4dfb4f19f56f71e31b8067404
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677592021.t1v-n-d163ce9a-w-0.1344023.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8414ba6c82d77bad684b451c11194d51834d0917c9fd2ecf72b26e198d7b5408
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677592180.t1v-n-d163ce9a-w-0.1350466.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83b7ebb7a355c38ce543ddace67c17fcda3b39efad8637520028b7548de964ba
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677592712.t1v-n-d163ce9a-w-0.1355445.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7675eefbe2395c77c95b12e778c549db892e3a6849aac7022bb535c19fda1f17
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677596196.t1v-n-d163ce9a-w-0.1363328.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5280463efd0ffadd26520dc5af0cf8bc7272ce109d9c7cb5d130cbffaae90d76
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677596396.t1v-n-d163ce9a-w-0.1367877.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03b9ae6d9966b67a2ecb933764153693d2c2e8815ccd08203af297694d498819
3
+ size 40
whisper-tiny-ft-dummy/events.out.tfevents.1677596532.t1v-n-d163ce9a-w-0.1372356.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:851cba504d6263302d36e66de1f9ce7521b2bf5df056c0f22e320b2e9247c2a2
3
+ size 40