Baselhany commited on
Commit
0f6a110
·
verified ·
1 Parent(s): 58ca4ff

Training in progress, step 13600, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ac4290640598dceb77135310af03477fc527a08702173117820a3f0b608a550
3
  size 223144592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7060279d45e7459b967e15db711d8cd9a1a17424e52d7e90ff35cb16d2ba87fb
3
  size 223144592
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6223c792fe2fedd67376643c060243639c17fd0e7777012ac3b19b15717ca7b5
3
  size 281574266
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79848332ea3785016fa03281822579bfa2a3e4281c3070d1112792454b5a75b5
3
  size 281574266
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a3c6d4339d07e8ddf01aaa0b578cae9c13ec8ba232b16c8c20e6158df854a4f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c380529cf06397e865a2ec4a90a8b0334d0b540ca21dea720996a4d58cbfe97
3
  size 14244
last-checkpoint/scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c0e095caeca862fdaf1f2a0bf15f3e74d530ed7cad799d4b08a06d363f3b11f
3
  size 988
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d13fec0d58a698a1588dfadf5d00d4fb69b490cb8fc2864fda3b2a2af6f17146
3
  size 988
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:808bb30e45f71065eb0cc43ff5a0467b811e2fe4cef3653bb3a24e25f6d1c7b1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45c8cf259ef238a9913feac351553475b48cc5d3f6153c06161cc934160ae3d6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": 13200,
3
  "best_metric": 0.20100206307102858,
4
  "best_model_checkpoint": "./distil-whisper/checkpoint-13200",
5
- "epoch": 9.407625155888116,
6
  "eval_steps": 400,
7
- "global_step": 13200,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -1229,6 +1229,43 @@
1229
  "eval_steps_per_second": 0.435,
1230
  "eval_wer": 0.20100206307102858,
1231
  "step": 13200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1232
  }
1233
  ],
1234
  "logging_steps": 100,
@@ -1248,7 +1285,7 @@
1248
  "attributes": {}
1249
  }
1250
  },
1251
- "total_flos": 1.716153403834368e+19,
1252
  "train_batch_size": 8,
1253
  "trial_name": null,
1254
  "trial_params": null
 
2
  "best_global_step": 13200,
3
  "best_metric": 0.20100206307102858,
4
  "best_model_checkpoint": "./distil-whisper/checkpoint-13200",
5
+ "epoch": 9.692677712453234,
6
  "eval_steps": 400,
7
+ "global_step": 13600,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
1229
  "eval_steps_per_second": 0.435,
1230
  "eval_wer": 0.20100206307102858,
1231
  "step": 13200
1232
+ },
1233
+ {
1234
+ "epoch": 9.478888295029396,
1235
+ "grad_norm": 19.601333618164062,
1236
+ "learning_rate": 5.506282335550628e-06,
1237
+ "loss": 1.6243,
1238
+ "step": 13300
1239
+ },
1240
+ {
1241
+ "epoch": 9.550151434170676,
1242
+ "grad_norm": 26.340774536132812,
1243
+ "learning_rate": 4.767184035476718e-06,
1244
+ "loss": 1.4496,
1245
+ "step": 13400
1246
+ },
1247
+ {
1248
+ "epoch": 9.621414573311954,
1249
+ "grad_norm": 19.866046905517578,
1250
+ "learning_rate": 4.028085735402809e-06,
1251
+ "loss": 1.5778,
1252
+ "step": 13500
1253
+ },
1254
+ {
1255
+ "epoch": 9.692677712453234,
1256
+ "grad_norm": 16.61330223083496,
1257
+ "learning_rate": 3.288987435328899e-06,
1258
+ "loss": 1.5726,
1259
+ "step": 13600
1260
+ },
1261
+ {
1262
+ "epoch": 9.692677712453234,
1263
+ "eval_loss": 0.09693081676959991,
1264
+ "eval_runtime": 145.8875,
1265
+ "eval_samples_per_second": 3.427,
1266
+ "eval_steps_per_second": 0.432,
1267
+ "eval_wer": 0.20837017388741527,
1268
+ "step": 13600
1269
  }
1270
  ],
1271
  "logging_steps": 100,
 
1285
  "attributes": {}
1286
  }
1287
  },
1288
+ "total_flos": 1.768186261536768e+19,
1289
  "train_batch_size": 8,
1290
  "trial_name": null,
1291
  "trial_params": null