Auto-sync checkpoint during training
Browse files- log/log-train-2026-01-13-11-12-22-0 +107 -0
- log/log-train-2026-01-13-11-12-22-1 +107 -0
- log/log-train-2026-01-13-11-15-39 +168 -0
- tensorboard/events.out.tfevents.1768302742.8e64ffbd666a.72955.0 +3 -0
- tensorboard/events.out.tfevents.1768302742.8e64ffbd666a.72961.0 +3 -0
- tensorboard/events.out.tfevents.1768302939.8e64ffbd666a.73099.0 +3 -0
- tensorboard/events.out.tfevents.1768302939.8e64ffbd666a.73100.0 +3 -0
log/log-train-2026-01-13-11-12-22-0
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-01-13 11:12:22,092 INFO [train.py:967] (0/2) Training started
|
| 2 |
+
2026-01-13 11:12:22,093 INFO [train.py:977] (0/2) Device: cuda:0
|
| 3 |
+
2026-01-13 11:12:22,096 INFO [train.py:986] (0/2) {
|
| 4 |
+
"am_scale": 0.0,
|
| 5 |
+
"attention_dims": "192,192,192,192,192",
|
| 6 |
+
"average_period": 200,
|
| 7 |
+
"base_lr": 0.05,
|
| 8 |
+
"batch_idx_train": 0,
|
| 9 |
+
"best_train_epoch": -1,
|
| 10 |
+
"best_train_loss": Infinity,
|
| 11 |
+
"best_valid_epoch": -1,
|
| 12 |
+
"best_valid_loss": Infinity,
|
| 13 |
+
"blank_id": 0,
|
| 14 |
+
"bpe_model": "/kaggle/working/amharic_training/bpe/bpe.model",
|
| 15 |
+
"bucketing_sampler": true,
|
| 16 |
+
"cnn_module_kernels": "31,31,31,31,31",
|
| 17 |
+
"concatenate_cuts": false,
|
| 18 |
+
"context_size": 2,
|
| 19 |
+
"decode_chunk_len": 32,
|
| 20 |
+
"decoder_dim": 512,
|
| 21 |
+
"drop_last": true,
|
| 22 |
+
"duration_factor": 1.0,
|
| 23 |
+
"enable_musan": false,
|
| 24 |
+
"enable_spec_aug": true,
|
| 25 |
+
"encoder_dims": "384,384,384,384,384",
|
| 26 |
+
"encoder_unmasked_dims": "256,256,256,256,256",
|
| 27 |
+
"env_info": {
|
| 28 |
+
"IP address": "172.19.2.2",
|
| 29 |
+
"hostname": "8e64ffbd666a",
|
| 30 |
+
"icefall-git-branch": "master",
|
| 31 |
+
"icefall-git-date": "Fri Nov 28 03:42:20 2025",
|
| 32 |
+
"icefall-git-sha1": "0904e490-dirty",
|
| 33 |
+
"icefall-path": "/kaggle/working/icefall",
|
| 34 |
+
"k2-build-type": "Release",
|
| 35 |
+
"k2-git-date": "Thu Jul 25 03:34:26 2024",
|
| 36 |
+
"k2-git-sha1": "40e8d1676f6062e46458dc32ad21229c93cc9c50",
|
| 37 |
+
"k2-path": "/usr/local/lib/python3.12/dist-packages/k2/__init__.py",
|
| 38 |
+
"k2-version": "1.24.4",
|
| 39 |
+
"k2-with-cuda": true,
|
| 40 |
+
"lhotse-path": "/usr/local/lib/python3.12/dist-packages/lhotse/__init__.py",
|
| 41 |
+
"lhotse-version": "1.32.1",
|
| 42 |
+
"python-version": "3.12",
|
| 43 |
+
"torch-cuda-available": true,
|
| 44 |
+
"torch-cuda-version": "12.1",
|
| 45 |
+
"torch-version": "2.4.0+cu121"
|
| 46 |
+
},
|
| 47 |
+
"exp_dir": "/kaggle/working/amharic_training/exp_amharic_streaming",
|
| 48 |
+
"feature_dim": 80,
|
| 49 |
+
"feedforward_dims": "1024,1024,2048,2048,1024",
|
| 50 |
+
"full_libri": false,
|
| 51 |
+
"gap": 1.0,
|
| 52 |
+
"inf_check": false,
|
| 53 |
+
"input_strategy": "PrecomputedFeatures",
|
| 54 |
+
"joiner_dim": 512,
|
| 55 |
+
"keep_last_k": 1,
|
| 56 |
+
"lm_scale": 0.25,
|
| 57 |
+
"log_interval": 50,
|
| 58 |
+
"lr_batches": 5000,
|
| 59 |
+
"lr_epochs": 3.5,
|
| 60 |
+
"manifest_dir": "/kaggle/working/amharic_training/manifests",
|
| 61 |
+
"master_port": 12354,
|
| 62 |
+
"max_duration": 60,
|
| 63 |
+
"mini_libri": false,
|
| 64 |
+
"nhead": "8,8,8,8,8",
|
| 65 |
+
"num_buckets": 30,
|
| 66 |
+
"num_encoder_layers": "2,4,3,2,4",
|
| 67 |
+
"num_epochs": 50,
|
| 68 |
+
"num_left_chunks": 4,
|
| 69 |
+
"num_workers": 2,
|
| 70 |
+
"on_the_fly_feats": false,
|
| 71 |
+
"print_diagnostics": false,
|
| 72 |
+
"prune_range": 5,
|
| 73 |
+
"reset_interval": 200,
|
| 74 |
+
"return_cuts": true,
|
| 75 |
+
"save_every_n": 1000,
|
| 76 |
+
"seed": 42,
|
| 77 |
+
"short_chunk_size": 50,
|
| 78 |
+
"shuffle": true,
|
| 79 |
+
"simple_loss_scale": 0.5,
|
| 80 |
+
"spec_aug_time_warp_factor": 80,
|
| 81 |
+
"start_batch": 0,
|
| 82 |
+
"start_epoch": 1,
|
| 83 |
+
"subsampling_factor": 4,
|
| 84 |
+
"tensorboard": true,
|
| 85 |
+
"use_fp16": true,
|
| 86 |
+
"valid_interval": 1600,
|
| 87 |
+
"vocab_size": 1000,
|
| 88 |
+
"warm_step": 2000,
|
| 89 |
+
"world_size": 2,
|
| 90 |
+
"zipformer_downsampling_factors": "1,2,4,8,2"
|
| 91 |
+
}
|
| 92 |
+
2026-01-13 11:12:22,097 INFO [train.py:988] (0/2) About to create model
|
| 93 |
+
2026-01-13 11:12:23,091 INFO [zipformer.py:405] (0/2) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
|
| 94 |
+
2026-01-13 11:12:23,126 INFO [train.py:992] (0/2) Number of model parameters: 71330891
|
| 95 |
+
2026-01-13 11:12:24,149 INFO [train.py:1007] (0/2) Using DDP
|
| 96 |
+
2026-01-13 11:12:27,182 INFO [asr_datamodule.py:422] (0/2) About to get train-clean-100 cuts
|
| 97 |
+
2026-01-13 11:12:27,183 INFO [asr_datamodule.py:239] (0/2) Disable MUSAN
|
| 98 |
+
2026-01-13 11:12:27,183 INFO [asr_datamodule.py:257] (0/2) Enable SpecAugment
|
| 99 |
+
2026-01-13 11:12:27,183 INFO [asr_datamodule.py:258] (0/2) Time warp factor: 80
|
| 100 |
+
2026-01-13 11:12:27,184 INFO [asr_datamodule.py:268] (0/2) Num frame mask: 10
|
| 101 |
+
2026-01-13 11:12:27,184 INFO [asr_datamodule.py:281] (0/2) About to create train dataset
|
| 102 |
+
2026-01-13 11:12:27,184 INFO [asr_datamodule.py:308] (0/2) Using DynamicBucketingSampler.
|
| 103 |
+
2026-01-13 11:12:27,760 INFO [asr_datamodule.py:324] (0/2) About to create train dataloader
|
| 104 |
+
2026-01-13 11:12:27,761 INFO [asr_datamodule.py:460] (0/2) About to get dev-clean cuts
|
| 105 |
+
2026-01-13 11:12:27,761 INFO [asr_datamodule.py:467] (0/2) About to get dev-other cuts
|
| 106 |
+
2026-01-13 11:12:27,762 INFO [asr_datamodule.py:355] (0/2) About to create dev dataset
|
| 107 |
+
2026-01-13 11:12:28,087 INFO [asr_datamodule.py:372] (0/2) About to create dev dataloader
|
log/log-train-2026-01-13-11-12-22-1
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-01-13 11:12:22,255 INFO [train.py:967] (1/2) Training started
|
| 2 |
+
2026-01-13 11:12:22,256 INFO [train.py:977] (1/2) Device: cuda:1
|
| 3 |
+
2026-01-13 11:12:22,259 INFO [train.py:986] (1/2) {
|
| 4 |
+
"am_scale": 0.0,
|
| 5 |
+
"attention_dims": "192,192,192,192,192",
|
| 6 |
+
"average_period": 200,
|
| 7 |
+
"base_lr": 0.05,
|
| 8 |
+
"batch_idx_train": 0,
|
| 9 |
+
"best_train_epoch": -1,
|
| 10 |
+
"best_train_loss": Infinity,
|
| 11 |
+
"best_valid_epoch": -1,
|
| 12 |
+
"best_valid_loss": Infinity,
|
| 13 |
+
"blank_id": 0,
|
| 14 |
+
"bpe_model": "/kaggle/working/amharic_training/bpe/bpe.model",
|
| 15 |
+
"bucketing_sampler": true,
|
| 16 |
+
"cnn_module_kernels": "31,31,31,31,31",
|
| 17 |
+
"concatenate_cuts": false,
|
| 18 |
+
"context_size": 2,
|
| 19 |
+
"decode_chunk_len": 32,
|
| 20 |
+
"decoder_dim": 512,
|
| 21 |
+
"drop_last": true,
|
| 22 |
+
"duration_factor": 1.0,
|
| 23 |
+
"enable_musan": false,
|
| 24 |
+
"enable_spec_aug": true,
|
| 25 |
+
"encoder_dims": "384,384,384,384,384",
|
| 26 |
+
"encoder_unmasked_dims": "256,256,256,256,256",
|
| 27 |
+
"env_info": {
|
| 28 |
+
"IP address": "172.19.2.2",
|
| 29 |
+
"hostname": "8e64ffbd666a",
|
| 30 |
+
"icefall-git-branch": "master",
|
| 31 |
+
"icefall-git-date": "Fri Nov 28 03:42:20 2025",
|
| 32 |
+
"icefall-git-sha1": "0904e490-dirty",
|
| 33 |
+
"icefall-path": "/kaggle/working/icefall",
|
| 34 |
+
"k2-build-type": "Release",
|
| 35 |
+
"k2-git-date": "Thu Jul 25 03:34:26 2024",
|
| 36 |
+
"k2-git-sha1": "40e8d1676f6062e46458dc32ad21229c93cc9c50",
|
| 37 |
+
"k2-path": "/usr/local/lib/python3.12/dist-packages/k2/__init__.py",
|
| 38 |
+
"k2-version": "1.24.4",
|
| 39 |
+
"k2-with-cuda": true,
|
| 40 |
+
"lhotse-path": "/usr/local/lib/python3.12/dist-packages/lhotse/__init__.py",
|
| 41 |
+
"lhotse-version": "1.32.1",
|
| 42 |
+
"python-version": "3.12",
|
| 43 |
+
"torch-cuda-available": true,
|
| 44 |
+
"torch-cuda-version": "12.1",
|
| 45 |
+
"torch-version": "2.4.0+cu121"
|
| 46 |
+
},
|
| 47 |
+
"exp_dir": "/kaggle/working/amharic_training/exp_amharic_streaming",
|
| 48 |
+
"feature_dim": 80,
|
| 49 |
+
"feedforward_dims": "1024,1024,2048,2048,1024",
|
| 50 |
+
"full_libri": false,
|
| 51 |
+
"gap": 1.0,
|
| 52 |
+
"inf_check": false,
|
| 53 |
+
"input_strategy": "PrecomputedFeatures",
|
| 54 |
+
"joiner_dim": 512,
|
| 55 |
+
"keep_last_k": 1,
|
| 56 |
+
"lm_scale": 0.25,
|
| 57 |
+
"log_interval": 50,
|
| 58 |
+
"lr_batches": 5000,
|
| 59 |
+
"lr_epochs": 3.5,
|
| 60 |
+
"manifest_dir": "/kaggle/working/amharic_training/manifests",
|
| 61 |
+
"master_port": 12354,
|
| 62 |
+
"max_duration": 60,
|
| 63 |
+
"mini_libri": false,
|
| 64 |
+
"nhead": "8,8,8,8,8",
|
| 65 |
+
"num_buckets": 30,
|
| 66 |
+
"num_encoder_layers": "2,4,3,2,4",
|
| 67 |
+
"num_epochs": 50,
|
| 68 |
+
"num_left_chunks": 4,
|
| 69 |
+
"num_workers": 2,
|
| 70 |
+
"on_the_fly_feats": false,
|
| 71 |
+
"print_diagnostics": false,
|
| 72 |
+
"prune_range": 5,
|
| 73 |
+
"reset_interval": 200,
|
| 74 |
+
"return_cuts": true,
|
| 75 |
+
"save_every_n": 1000,
|
| 76 |
+
"seed": 42,
|
| 77 |
+
"short_chunk_size": 50,
|
| 78 |
+
"shuffle": true,
|
| 79 |
+
"simple_loss_scale": 0.5,
|
| 80 |
+
"spec_aug_time_warp_factor": 80,
|
| 81 |
+
"start_batch": 0,
|
| 82 |
+
"start_epoch": 1,
|
| 83 |
+
"subsampling_factor": 4,
|
| 84 |
+
"tensorboard": true,
|
| 85 |
+
"use_fp16": true,
|
| 86 |
+
"valid_interval": 1600,
|
| 87 |
+
"vocab_size": 1000,
|
| 88 |
+
"warm_step": 2000,
|
| 89 |
+
"world_size": 2,
|
| 90 |
+
"zipformer_downsampling_factors": "1,2,4,8,2"
|
| 91 |
+
}
|
| 92 |
+
2026-01-13 11:12:22,260 INFO [train.py:988] (1/2) About to create model
|
| 93 |
+
2026-01-13 11:12:23,319 INFO [zipformer.py:405] (1/2) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
|
| 94 |
+
2026-01-13 11:12:23,354 INFO [train.py:992] (1/2) Number of model parameters: 71330891
|
| 95 |
+
2026-01-13 11:12:23,570 INFO [train.py:1007] (1/2) Using DDP
|
| 96 |
+
2026-01-13 11:12:27,109 INFO [asr_datamodule.py:422] (1/2) About to get train-clean-100 cuts
|
| 97 |
+
2026-01-13 11:12:27,110 INFO [asr_datamodule.py:239] (1/2) Disable MUSAN
|
| 98 |
+
2026-01-13 11:12:27,111 INFO [asr_datamodule.py:257] (1/2) Enable SpecAugment
|
| 99 |
+
2026-01-13 11:12:27,111 INFO [asr_datamodule.py:258] (1/2) Time warp factor: 80
|
| 100 |
+
2026-01-13 11:12:27,111 INFO [asr_datamodule.py:268] (1/2) Num frame mask: 10
|
| 101 |
+
2026-01-13 11:12:27,111 INFO [asr_datamodule.py:281] (1/2) About to create train dataset
|
| 102 |
+
2026-01-13 11:12:27,111 INFO [asr_datamodule.py:308] (1/2) Using DynamicBucketingSampler.
|
| 103 |
+
2026-01-13 11:12:27,697 INFO [asr_datamodule.py:324] (1/2) About to create train dataloader
|
| 104 |
+
2026-01-13 11:12:27,697 INFO [asr_datamodule.py:460] (1/2) About to get dev-clean cuts
|
| 105 |
+
2026-01-13 11:12:27,698 INFO [asr_datamodule.py:467] (1/2) About to get dev-other cuts
|
| 106 |
+
2026-01-13 11:12:27,699 INFO [asr_datamodule.py:355] (1/2) About to create dev dataset
|
| 107 |
+
2026-01-13 11:12:27,937 INFO [asr_datamodule.py:372] (1/2) About to create dev dataloader
|
log/log-train-2026-01-13-11-15-39
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-01-13 11:15:39,636 INFO [train.py:967] Training started
|
| 2 |
+
2026-01-13 11:15:39,637 INFO [train.py:977] Device: cuda:0
|
| 3 |
+
2026-01-13 11:15:39,639 INFO [train.py:986] {
|
| 4 |
+
"am_scale": 0.0,
|
| 5 |
+
"attention_dims": "192,192,192,192,192",
|
| 6 |
+
"average_period": 200,
|
| 7 |
+
"base_lr": 0.05,
|
| 8 |
+
"batch_idx_train": 0,
|
| 9 |
+
"best_train_epoch": -1,
|
| 10 |
+
"best_train_loss": Infinity,
|
| 11 |
+
"best_valid_epoch": -1,
|
| 12 |
+
"best_valid_loss": Infinity,
|
| 13 |
+
"blank_id": 0,
|
| 14 |
+
"bpe_model": "/kaggle/working/amharic_training/bpe/bpe.model",
|
| 15 |
+
"bucketing_sampler": true,
|
| 16 |
+
"cnn_module_kernels": "31,31,31,31,31",
|
| 17 |
+
"concatenate_cuts": false,
|
| 18 |
+
"context_size": 2,
|
| 19 |
+
"decode_chunk_len": 32,
|
| 20 |
+
"decoder_dim": 512,
|
| 21 |
+
"drop_last": true,
|
| 22 |
+
"duration_factor": 1.0,
|
| 23 |
+
"enable_musan": false,
|
| 24 |
+
"enable_spec_aug": true,
|
| 25 |
+
"encoder_dims": "384,384,384,384,384",
|
| 26 |
+
"encoder_unmasked_dims": "256,256,256,256,256",
|
| 27 |
+
"env_info": {
|
| 28 |
+
"IP address": "172.19.2.2",
|
| 29 |
+
"hostname": "8e64ffbd666a",
|
| 30 |
+
"icefall-git-branch": "master",
|
| 31 |
+
"icefall-git-date": "Fri Nov 28 03:42:20 2025",
|
| 32 |
+
"icefall-git-sha1": "0904e490-dirty",
|
| 33 |
+
"icefall-path": "/kaggle/working/icefall",
|
| 34 |
+
"k2-build-type": "Release",
|
| 35 |
+
"k2-git-date": "Thu Jul 25 03:34:26 2024",
|
| 36 |
+
"k2-git-sha1": "40e8d1676f6062e46458dc32ad21229c93cc9c50",
|
| 37 |
+
"k2-path": "/usr/local/lib/python3.12/dist-packages/k2/__init__.py",
|
| 38 |
+
"k2-version": "1.24.4",
|
| 39 |
+
"k2-with-cuda": true,
|
| 40 |
+
"lhotse-path": "/usr/local/lib/python3.12/dist-packages/lhotse/__init__.py",
|
| 41 |
+
"lhotse-version": "1.32.1",
|
| 42 |
+
"python-version": "3.12",
|
| 43 |
+
"torch-cuda-available": true,
|
| 44 |
+
"torch-cuda-version": "12.1",
|
| 45 |
+
"torch-version": "2.4.0+cu121"
|
| 46 |
+
},
|
| 47 |
+
"exp_dir": "/kaggle/working/amharic_training/exp_amharic_streaming",
|
| 48 |
+
"feature_dim": 80,
|
| 49 |
+
"feedforward_dims": "1024,1024,2048,2048,1024",
|
| 50 |
+
"full_libri": false,
|
| 51 |
+
"gap": 1.0,
|
| 52 |
+
"inf_check": false,
|
| 53 |
+
"input_strategy": "PrecomputedFeatures",
|
| 54 |
+
"joiner_dim": 512,
|
| 55 |
+
"keep_last_k": 1,
|
| 56 |
+
"lm_scale": 0.25,
|
| 57 |
+
"log_interval": 50,
|
| 58 |
+
"lr_batches": 5000,
|
| 59 |
+
"lr_epochs": 3.5,
|
| 60 |
+
"manifest_dir": "/kaggle/working/amharic_training/manifests",
|
| 61 |
+
"master_port": 12354,
|
| 62 |
+
"max_duration": 60,
|
| 63 |
+
"mini_libri": false,
|
| 64 |
+
"nhead": "8,8,8,8,8",
|
| 65 |
+
"num_buckets": 30,
|
| 66 |
+
"num_encoder_layers": "2,4,3,2,4",
|
| 67 |
+
"num_epochs": 50,
|
| 68 |
+
"num_left_chunks": 4,
|
| 69 |
+
"num_workers": 2,
|
| 70 |
+
"on_the_fly_feats": false,
|
| 71 |
+
"print_diagnostics": false,
|
| 72 |
+
"prune_range": 5,
|
| 73 |
+
"reset_interval": 200,
|
| 74 |
+
"return_cuts": true,
|
| 75 |
+
"save_every_n": 1000,
|
| 76 |
+
"seed": 42,
|
| 77 |
+
"short_chunk_size": 50,
|
| 78 |
+
"shuffle": true,
|
| 79 |
+
"simple_loss_scale": 0.5,
|
| 80 |
+
"spec_aug_time_warp_factor": 80,
|
| 81 |
+
"start_batch": 0,
|
| 82 |
+
"start_epoch": 1,
|
| 83 |
+
"subsampling_factor": 4,
|
| 84 |
+
"tensorboard": true,
|
| 85 |
+
"use_fp16": true,
|
| 86 |
+
"valid_interval": 1600,
|
| 87 |
+
"vocab_size": 1000,
|
| 88 |
+
"warm_step": 2000,
|
| 89 |
+
"world_size": 1,
|
| 90 |
+
"zipformer_downsampling_factors": "1,2,4,8,2"
|
| 91 |
+
}
|
| 92 |
+
2026-01-13 11:15:39,640 INFO [train.py:988] About to create model
|
| 93 |
+
2026-01-13 11:15:40,251 INFO [zipformer.py:405] At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
|
| 94 |
+
2026-01-13 11:15:40,269 INFO [train.py:992] Number of model parameters: 71330891
|
| 95 |
+
2026-01-13 11:15:42,539 INFO [asr_datamodule.py:422] About to get train-clean-100 cuts
|
| 96 |
+
2026-01-13 11:15:42,540 INFO [asr_datamodule.py:239] Disable MUSAN
|
| 97 |
+
2026-01-13 11:15:42,540 INFO [asr_datamodule.py:257] Enable SpecAugment
|
| 98 |
+
2026-01-13 11:15:42,541 INFO [asr_datamodule.py:258] Time warp factor: 80
|
| 99 |
+
2026-01-13 11:15:42,541 INFO [asr_datamodule.py:268] Num frame mask: 10
|
| 100 |
+
2026-01-13 11:15:42,541 INFO [asr_datamodule.py:281] About to create train dataset
|
| 101 |
+
2026-01-13 11:15:42,541 INFO [asr_datamodule.py:308] Using DynamicBucketingSampler.
|
| 102 |
+
2026-01-13 11:15:42,849 INFO [asr_datamodule.py:324] About to create train dataloader
|
| 103 |
+
2026-01-13 11:15:42,850 INFO [asr_datamodule.py:460] About to get dev-clean cuts
|
| 104 |
+
2026-01-13 11:15:42,850 INFO [asr_datamodule.py:467] About to get dev-other cuts
|
| 105 |
+
2026-01-13 11:15:42,851 INFO [asr_datamodule.py:355] About to create dev dataset
|
| 106 |
+
2026-01-13 11:15:43,073 INFO [asr_datamodule.py:372] About to create dev dataloader
|
| 107 |
+
2026-01-13 11:15:46,732 INFO [train.py:895] Epoch 1, batch 0, loss[loss=8.347, simple_loss=7.594, pruned_loss=7.506, over 1138.00 frames. ], tot_loss[loss=8.347, simple_loss=7.594, pruned_loss=7.506, over 1138.00 frames. ], batch size: 3, lr: 2.50e-02, grad_scale: 2.0
|
| 108 |
+
2026-01-13 11:15:46,733 INFO [train.py:920] Computing validation loss
|
| 109 |
+
2026-01-13 11:16:43,490 INFO [zipformer.py:2441] attn_weights_entropy = tensor([2.9199, 2.9204, 2.9209, 2.9167, 2.9196, 2.9205, 2.9204, 2.9204],
|
| 110 |
+
device='cuda:0'), covar=tensor([0.0048, 0.0085, 0.0084, 0.0041, 0.0051, 0.0054, 0.0087, 0.0047],
|
| 111 |
+
device='cuda:0'), in_proj_covar=tensor([0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009],
|
| 112 |
+
device='cuda:0'), out_proj_covar=tensor([8.5571e-06, 8.6460e-06, 8.6548e-06, 8.5692e-06, 8.8457e-06, 8.6909e-06,
|
| 113 |
+
8.7530e-06, 8.7241e-06], device='cuda:0')
|
| 114 |
+
2026-01-13 11:17:23,271 INFO [zipformer.py:2441] attn_weights_entropy = tensor([4.2729, 4.2729, 4.2729, 4.2729, 4.2729, 4.2729, 4.2729, 4.2729],
|
| 115 |
+
device='cuda:0'), covar=tensor([0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0001, 0.0003],
|
| 116 |
+
device='cuda:0'), in_proj_covar=tensor([0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0008],
|
| 117 |
+
device='cuda:0'), out_proj_covar=tensor([8.8449e-06, 8.8559e-06, 8.7936e-06, 8.6492e-06, 8.7990e-06, 8.7099e-06,
|
| 118 |
+
8.5965e-06, 8.7138e-06], device='cuda:0')
|
| 119 |
+
2026-01-13 11:17:38,423 INFO [zipformer.py:2441] attn_weights_entropy = tensor([3.5795, 3.5857, 3.5854, 3.5868, 3.5840, 3.5854, 3.5849, 3.5870],
|
| 120 |
+
device='cuda:0'), covar=tensor([0.0071, 0.0040, 0.0091, 0.0069, 0.0067, 0.0080, 0.0082, 0.0090],
|
| 121 |
+
device='cuda:0'), in_proj_covar=tensor([0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009],
|
| 122 |
+
device='cuda:0'), out_proj_covar=tensor([8.7006e-06, 8.7710e-06, 8.6193e-06, 8.7975e-06, 8.6463e-06, 8.7048e-06,
|
| 123 |
+
8.7000e-06, 8.8221e-06], device='cuda:0')
|
| 124 |
+
2026-01-13 11:18:03,133 INFO [zipformer.py:2441] attn_weights_entropy = tensor([2.9240, 2.9246, 2.9252, 2.9183, 2.9235, 2.9247, 2.9246, 2.9246],
|
| 125 |
+
device='cuda:0'), covar=tensor([0.0028, 0.0044, 0.0047, 0.0031, 0.0032, 0.0029, 0.0042, 0.0047],
|
| 126 |
+
device='cuda:0'), in_proj_covar=tensor([0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009],
|
| 127 |
+
device='cuda:0'), out_proj_covar=tensor([8.5571e-06, 8.6460e-06, 8.6548e-06, 8.5693e-06, 8.8457e-06, 8.6908e-06,
|
| 128 |
+
8.7529e-06, 8.7241e-06], device='cuda:0')
|
| 129 |
+
2026-01-13 11:18:31,124 INFO [train.py:929] Epoch 1, validation: loss=8.285, simple_loss=7.53, pruned_loss=7.544, over 824393.00 frames.
|
| 130 |
+
2026-01-13 11:18:31,125 INFO [train.py:930] Maximum memory allocated so far is 2027MB
|
| 131 |
+
2026-01-13 11:18:32,824 INFO [zipformer.py:1188] warmup_begin=3333.3, warmup_end=4000.0, batch_count=5.0, num_to_drop=2, layers_to_drop={0, 2}
|
| 132 |
+
2026-01-13 11:18:40,224 INFO [zipformer.py:1188] warmup_begin=666.7, warmup_end=1333.3, batch_count=23.0, num_to_drop=1, layers_to_drop={0}
|
| 133 |
+
2026-01-13 11:18:40,814 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=192, metric=12.00 vs. limit=2.0
|
| 134 |
+
2026-01-13 11:18:51,813 INFO [train.py:895] Epoch 1, batch 50, loss[loss=1.06, simple_loss=0.9412, pruned_loss=1.061, over 1185.00 frames. ], tot_loss[loss=2.158, simple_loss=1.964, pruned_loss=1.872, over 59802.58 frames. ], batch size: 3, lr: 2.75e-02, grad_scale: 2.022026-01-13 11:18:57,932 INFO [scaling.py:681] Whitening: num_groups=1, num_channels=384, metric=93.77 vs. limit=5.022026-01-13 11:19:03,743 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=192, metric=20.91 vs. limit=2.022026-01-13 11:19:05,183 INFO [zipformer.py:1188] warmup_begin=2666.7, warmup_end=3333.3, batch_count=83.0, num_to_drop=1, layers_to_drop={0}22026-01-13 11:19:11,196 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=96, metric=3.57 vs. limit=2.022026-01-13 11:19:12,496 INFO [optim.py:365] Clipping_scale=2.0, grad-norm quartiles 1.111e+01 2.024e+01 2.936e+01 9.522e+01 1.063e+03, threshold=5.872e+01, percent-clipped=0.0
|
| 135 |
+
2026-01-13 11:19:12,570 INFO [train.py:895] Epoch 1, batch 100, loss[loss=0.9899, simple_loss=0.866, pruned_loss=1.002, over 1447.00 frames. ], tot_loss[loss=1.58, simple_loss=1.422, pruned_loss=1.444, over 105356.72 frames. ], batch size: 4, lr: 3.00e-02, grad_scale: 2.202026-01-13 11:19:24,054 INFO [scaling.py:681] Whitening: num_groups=1, num_channels=384, metric=30.26 vs. limit=20262026-01-13 11:19:27,214 INFO [zipformer.py:2441] attn_weights_entropy = tensor([4.4010, 4.4031, 4.4032, 4.4032, 4.4031, 4.4031, 4.4032, 4.4030],
|
| 136 |
+
device='cuda:0'), covar=tensor([1.2180e-04, 8.4329e-05, 8.6392e-05, 7.3779e-05, 2.2041e-04, 1.1491e-04,
|
| 137 |
+
1.1872e-04, 9.2592e-05], device='cuda:0'), in_proj_covar=tensor([0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009],
|
| 138 |
+
device='cuda:0'), out_proj_covar=tensor([9.2048e-06, 8.9728e-06, 8.9252e-06, 8.8810e-06, 8.9491e-06, 8.8957e-06,
|
| 139 |
+
8.9637e-06, 9.0581e-06], device='cuda:20262026-01-13 11:19:30,078 INFO [zipformer.py:1188] warmup_begin=3333.3, warmup_end=4000.0, batch_count=144.0, num_to_drop=2, layers_to_drop={0,20262026-01-13 11:19:32,834 INFO [train.py:895] Epoch 1, batch 150, loss[loss=1.004, simple_loss=0.8657, pruned_loss=1.017, over 1195.00 frames. ], tot_loss[loss=1.38, simple_loss=1.228, pruned_loss=1.303, over 138827.38 frames. ], batch size: 3, lr: 3.25e-02, grad_scale:2026-2026-01-13 11:19:43,151 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=96, metric=2.69 vs. limit=2.0
|
| 140 |
+
2026-01-13 11:19:47,095 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=192, metric=3.28 vs. limit2026-2026-01-13 11:19:54,392 INFO [optim.py:365] Clipping_scale=2.0, grad-norm quartiles 1.323e+01 1.814e+01 2.145e+01 2.882e+01 9.701e+01, threshold=4.289e+01, percent-clipped=1.0
|
| 141 |
+
2026-01-13 11:19:54,476 INFO [train.py:895] Epoch 1, batch 200, loss[loss=1.161, simple_loss=0.9868, pruned_loss=1.174, over 1328.00 frames. ], tot_loss[loss=1.254, simple_loss=1.102, pruned_loss=1.213, over 165766.02 frames. ], batch size: 8, lr: 3.50e-02, grad_scale2026-01-13 11:20:06,348 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=96, metric=2.11 vs. limit=2.0
|
| 142 |
+
2026-01-13 11:20:15,135 INFO [train.py:895] Epoch 1, batch 250, loss[loss=1.047, simple_loss=0.8716, pruned_loss=1.08, over 1239.00 frames. ], tot_loss[loss=1.174, simple_loss=1.02, pruned_loss=1.153, over 187031.74 frames. ], batch size: 5, lr: 3.75e-02, grad_scale: 2.0
|
| 143 |
+
2026-01-13 11:20:17,087 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=192, metric=2.97 vs. limit=2.0
|
| 144 |
+
2026-01-13 11:20:21,996 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=96, metric=3.38 vs. limit=2.0
|
| 145 |
+
2026-01-13 11:20:33,994 INFO [zipformer.py:1188] warmup_begin=2666.7, warmup_end=3333.3, batch_count=296.0, num_to_drop=1, layers_to_drop={0}
|
| 146 |
+
2026-01-13 11:20:35,619 INFO [zipformer.py:1188] warmup_begin=1333.3, warmup_end=2000.0, batch_count=300.0, num_to_drop=2, layers_to_drop={0, 1}
|
| 147 |
+
2026-01-13 11:20:35,721 INFO [scaling.py:681] Whitening: num_groups=1, num_channels=384, metric=175.27 vs. limit=5.0
|
| 148 |
+
2026-01-13 11:20:35,874 INFO [optim.py:365] Clipping_scale=2.0, grad-norm quartiles 1.769e+01 2.261e+01 2.728e+01 3.480e+01 7.743e+01, threshold=5.457e+01, percent-clipped=15.0
|
| 149 |
+
2026-01-13 11:20:35,952 INFO [train.py:895] Epoch 1, batch 300, loss[loss=0.8913, simple_loss=0.7456, pruned_loss=0.8574, over 1440.00 frames. ], tot_loss[loss=1.111, simple_loss=0.9562, pruned_loss=1.097, over 204097.56 frames. ], batch size: 5, lr: 4.00e-02, grad_scale: 2.0
|
| 150 |
+
2026-01-13 11:20:41,005 INFO [zipformer.py:2441] attn_weights_entropy = tensor([3.2955, 3.2955, 3.2954, 3.2957, 3.2949, 3.2948, 3.2952, 3.2957],
|
| 151 |
+
device='cuda:0'), covar=tensor([1.1076e-04, 1.1372e-04, 9.5709e-05, 1.0754e-04, 1.4747e-04, 1.1923e-04,
|
| 152 |
+
1.1443e-04, 1.1999e-04], device='cuda:0'), in_proj_covar=tensor([0.0008, 0.0008, 0.0009, 0.0008, 0.0009, 0.0008, 0.0008, 0.0009],
|
| 153 |
+
device='cuda:0'), out_proj_covar=tensor([8.1433e-06, 8.1599e-06, 8.3961e-06, 8.0358e-06, 8.4745e-06, 8.2381e-06,
|
| 154 |
+
8.1010e-06, 8.322026-01-13 11:20:43,371 IN2026-01-13 11:20:46,824 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=192, m2026-01-13 11:20:54,053 IN2026-01-13 11:20:56,384 INFO [train.py:895] Epoch 1, batch 350, loss[loss=0.8516, simple_loss=0.6946, pruned_loss=0.8449, over 1156.00 frames. ], tot_loss[loss=1.067, simple_loss=0.909, pruned_loss=1.051, over 216408.64 frames. ], batch size: 3, lr: 4.25e-02, grad_scale: 2.0
|
| 155 |
+
2026-01-13 11:20:56,520 INFO [zipformer.py:2441] attn_weights_entropy = tensor([3.6183, 32026-01-13 11:20:57,948 INFO [zipformer.py:1188] warmup_begin=3333.3, warmup_end=4000.0, batch_count=357.0, num_to_drop=2, layers_to_drop={1, 3}
|
| 156 |
+
2026-01-13 11:21:01,766 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=96, metric=4.02 vs. limit=2.0
|
| 157 |
+
2026-01-13 11:21:10,094 INFO [zipformer.py:1188] warmup_begin=2666.7, warmup_end=3333.3, batch_count=387.0, num_to_drop=1, layers_to_drop={1}
|
| 158 |
+
2026-01-13 11:21:16,068 INFO [optim.py:365] Clipping_scale=2.0, grad-norm quartiles 1.651e+01 2.261e+01 2.593e+01 3.064e+01 1.629e+02, threshold=5.186e+01, percent-clipped=5.0
|
| 159 |
+
2026-01-13 11:21:16,147 INFO [train.py:895] Epoch 1, batch 400, loss[loss=0.8826, simple_loss=0.7096, pruned_loss=0.8707, over 1354.00 frames. ], tot_loss[loss=1.038, simple_loss=0.87612026-01-13 11:21:16,983 INFO [optim.py:365] Clipping_scale=2.0, grad-norm quartiles 1.765e+01 2.277e+01 2.714e+01 3.457e+01 7.515e+01, threshold=5.428e+01, percent-clipped=4.0
|
| 160 |
+
2026-01-13 11:21:17,069 INFO [2026-01-13 11:21:19,307 INFO [scaling.py:681] Whitening: num_groups=1, num_channels=384, metric=57.39 vs. limit=5.0
|
| 161 |
+
2026-01-13 11:21:25,974 INFO [scaling.py:681] Whitening: num_groups=1, num_channels=384, metric=38.46 vs. limit=5.0
|
| 162 |
+
2026-01-13 11:21:26,311 INFO [scaling.py:681] Whitening: num_groups=1, num_channels=384, metric=63.52 vs. limit=5.0
|
| 163 |
+
2026-01-13 11:21:27,726 INFO [scaling.py:681] Whitening: num_groups=8, num_channels=96, metric=3.00 vs. limit=2.0
|
| 164 |
+
2026-01-13 11:2026-01-13 11:21:33,034 INFO [zipformer.py:1188] warmup_begin=1333.3, warmup_end=2000.0, batch_count=439.0, num_to_drop=2, layers_to_drop={0, 2}
|
| 165 |
+
2026-01-13 11:21:36,909 INFO [zipformer.py:1188] warmup_begin=3333.3, warmup_end=4000.0, batch_count=448.0, num_to_drop=2, layers_to_drop={1, 3}
|
| 166 |
+
2026-01-13 11:21:38,077 INFO [train.py:895] Epoch 1, batch 450, loss[loss=0.9799, simple_loss=0.7873, pruned_loss=0.9287, over 1340.00 frames. ], tot_loss[loss=1.02, simple_loss=0.853, pruned_loss=0.9918, over 233614.14 frames. ], batch size: 4, lr: 4.75e-02, grad_scale: 4.0
|
| 167 |
+
batch_count=448.0, num_to_drop=2, layers_to_drop={0, 1}
|
| 168 |
+
2026-01-13 11:21:36,807 INFO [train.py:895] Epoch 1, batch 450, loss[loss=0.9186, simple_loss=0.7324, pruned_loss=0.887, over 1332.00 frames. ], tot_loss[loss=1.02, simple_loss=0.8523, pruned_loss=0.9903, over 233825.80 frames. ], batch size: 4, lr: 4.75e-02, grad_scale: 4.0
|
tensorboard/events.out.tfevents.1768302742.8e64ffbd666a.72955.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ce29a3b87fcb3eb332f40a0ceac3e052cca453bb7565b3ee5b1e35464ac78fc
|
| 3 |
+
size 88
|
tensorboard/events.out.tfevents.1768302742.8e64ffbd666a.72961.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:708f41f566c3f32e2a9aebe2dc1230cf3f32e828b8900916fec5b8bc317c5b30
|
| 3 |
+
size 88
|
tensorboard/events.out.tfevents.1768302939.8e64ffbd666a.73099.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:009de64d9947e993c86d0943b6de2d8e3283c94c1e100e9bce04aa63184a2e6a
|
| 3 |
+
size 2171
|
tensorboard/events.out.tfevents.1768302939.8e64ffbd666a.73100.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c35e1721129a26ae993933db8a6a37e9a457eb31db0bba3cbd759e1e468f5460
|
| 3 |
+
size 2171
|