pere commited on
Commit
c26c107
·
1 Parent(s): 2f98379
Files changed (1) hide show
  1. run_continue_nst.sh +5 -4
run_continue_nst.sh CHANGED
@@ -3,8 +3,8 @@
3
  # Reduce batch size and learning rate if training on smaller GPU
4
 
5
  python run_whisper_finetuning.py \
6
- --model_name_or_path="../whisper-NST" \
7
- --output_dir="../whisper-NST" \
8
  --overwrite_output_dir=True \
9
  --language="Norwegian" \
10
  --task="transcribe" \
@@ -14,9 +14,10 @@ python run_whisper_finetuning.py \
14
  --do_eval=True \
15
  --audio_column_name="audio" \
16
  --text_column_name="text" \
17
- --per_device_train_batch_size=24 \
18
- --per_device_train_batch_size=24 \
19
  --learning_rate=2e-5 \
 
20
  --warmup_steps=500 \
21
  --max_steps=10000 \
22
  --gradient_checkpointing=True \
 
3
  # Reduce batch size and learning rate if training on smaller GPU
4
 
5
  python run_whisper_finetuning.py \
6
+ --model_name_or_path="openai/whisper-small" \
7
+ --output_dir="../whisper-NST-cons2e5" \
8
  --overwrite_output_dir=True \
9
  --language="Norwegian" \
10
  --task="transcribe" \
 
14
  --do_eval=True \
15
  --audio_column_name="audio" \
16
  --text_column_name="text" \
17
+ --per_device_train_batch_size=16 \
18
+ --per_device_train_batch_size=16 \
19
  --learning_rate=2e-5 \
20
+ --lr_scheduler_type="constant_with_warmup" \
21
  --warmup_steps=500 \
22
  --max_steps=10000 \
23
  --gradient_checkpointing=True \