patrickvonplaten commited on
Commit
83832cf
·
1 Parent(s): d02232c
Files changed (4) hide show
  1. config.json +2 -4
  2. ds_config_wav2vec2_zero2.json +51 -0
  3. run_main.sh +22 -14
  4. run_pretrain.py +1 -0
config.json CHANGED
@@ -1,6 +1,5 @@
1
  {
2
  "activation_dropout": 0.0,
3
- "apply_spec_augment": true,
4
  "architectures": [
5
  "Wav2Vec2ForPreTraining"
6
  ],
@@ -73,7 +72,6 @@
73
  "num_hidden_layers": 12,
74
  "num_negatives": 100,
75
  "pad_token_id": 0,
76
- "proj_codevector_dim": 256,
77
- "transformers_version": "4.10.0.dev0",
78
- "vocab_size": 32
79
  }
 
1
  {
2
  "activation_dropout": 0.0,
 
3
  "architectures": [
4
  "Wav2Vec2ForPreTraining"
5
  ],
 
72
  "num_hidden_layers": 12,
73
  "num_negatives": 100,
74
  "pad_token_id": 0,
75
+ "proj_codevector_dim": 768,
76
+ "transformers_version": "4.10.0.dev0"
 
77
  }
ds_config_wav2vec2_zero2.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fp16": {
3
+ "enabled": "auto",
4
+ "loss_scale": 0,
5
+ "loss_scale_window": 1000,
6
+ "initial_scale_power": 16,
7
+ "hysteresis": 2,
8
+ "min_loss_scale": 1
9
+ },
10
+
11
+ "optimizer": {
12
+ "type": "AdamW",
13
+ "params": {
14
+ "lr": "auto",
15
+ "betas": "auto",
16
+ "eps": "auto",
17
+ "weight_decay": "auto"
18
+ }
19
+ },
20
+
21
+ "scheduler": {
22
+ "type": "WarmupLR",
23
+ "params": {
24
+ "warmup_min_lr": "auto",
25
+ "warmup_max_lr": "auto",
26
+ "warmup_num_steps": "auto"
27
+ }
28
+ },
29
+
30
+ "zero_optimization": {
31
+ "stage": 2,
32
+ "offload_optimizer": {
33
+ "device": "cpu",
34
+ "pin_memory": true
35
+ },
36
+ "find_unused_parameters": true,
37
+ "allgather_partitions": true,
38
+ "allgather_bucket_size": 2e8,
39
+ "overlap_comm": true,
40
+ "reduce_scatter": true,
41
+ "reduce_bucket_size": 2e8,
42
+ "contiguous_gradients": true
43
+ },
44
+
45
+ "gradient_accumulation_steps": "auto",
46
+ "gradient_clipping": "auto",
47
+ "steps_per_print": 2000,
48
+ "train_batch_size": "auto",
49
+ "train_micro_batch_size_per_gpu": "auto",
50
+ "wall_clock_breakdown": false
51
+ }
run_main.sh CHANGED
@@ -1,15 +1,23 @@
1
  #!/usr/bin/env bash
2
- ./run_wav2vec2_pretrain_flax.py \
3
- --output_dir="./" \
4
- --num_train_epochs="2" \
5
- --dataset_name="patrickvonplaten/librispeech_local" \
6
- --dataset_config_name="clean" \
7
- --train_split_name="train" \
8
- --per_device_train_batch_size="16" \
9
- --per_device_eval_batch_size="16" \
10
- --learning_rate="5e-3" \
11
- --weight_decay="0.01" \
12
- --warmup_steps="1000" \
13
- --model_name_or_path="./" \
14
- --pad_to_multiple_of="16384" \
15
- --logging_steps="2" \
 
 
 
 
 
 
 
 
 
1
  #!/usr/bin/env bash
2
+ PYTHONPATH=/home/patrick/hugging_face/transformers/src deepspeed --num_gpus 4
3
+ ./run_pretrain.py \
4
+ --output_dir="./test" \
5
+ --num_train_epochs="20" \
6
+ --per_device_train_batch_size="32" \
7
+ --per_device_eval_batch_size="32" \
8
+ --gradient_accumulation_steps="2" \
9
+ --save_total_limit="3" \
10
+ --save_steps="500" \
11
+ --logging_steps="10" \
12
+ --learning_rate="5e-3" \
13
+ --weight_decay="0.01" \
14
+ --warmup_steps="3000" \
15
+ --model_name_or_path="patrickvonplaten/wav2vec2-base-libri-100h" \
16
+ --dataset_name="patrickvonplaten/librispeech_local" \
17
+ --dataset_config_name="clean" \
18
+ --train_split_name="train" \
19
+ --preprocessing_num_workers="4" \
20
+ --verbose_logging \
21
+ --fp16 \
22
+ --deepspeed ds_config_wav2vec2_zero2.json \
23
+ # --group_by_length \
run_pretrain.py ADDED
@@ -0,0 +1 @@
 
 
1
+ /home/patrick/hugging_face/transformers/examples/research_projects/wav2vec2/run_pretrain.py