Priyanship commited on
Commit
083f0df
·
verified ·
1 Parent(s): b3bec42

End of training

Browse files
Files changed (6) hide show
  1. README.md +13 -22
  2. all_results.json +8 -14
  3. config.json +1 -1
  4. eval_results.json +8 -8
  5. model.safetensors +1 -1
  6. training_args.bin +1 -1
README.md CHANGED
@@ -1,8 +1,7 @@
1
  ---
 
2
  tags:
3
  - generated_from_trainer
4
- metrics:
5
- - wer
6
  model-index:
7
  - name: output
8
  results: []
@@ -11,14 +10,18 @@ model-index:
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
  should probably proofread and complete it, then remove this comment. -->
13
 
14
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/priyanshipal/huggingface/runs/hvo6b5jz)
15
  # output
16
 
17
  This model was trained from scratch on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 1.9947
20
- - Cer: 0.4133
21
- - Wer: 0.6195
 
 
 
 
 
22
 
23
  ## Model description
24
 
@@ -45,25 +48,13 @@ The following hyperparameters were used during training:
45
  - total_train_batch_size: 32
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
- - lr_scheduler_warmup_steps: 2000
49
- - training_steps: 6000
50
  - mixed_precision_training: Native AMP
51
 
52
- ### Training results
53
-
54
- | Training Loss | Epoch | Step | Validation Loss | Cer | Wer |
55
- |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|
56
- | 1.5851 | 1.6 | 1000 | 1.9947 | 0.4133 | 0.6195 |
57
- | 1.8352 | 3.2 | 2000 | 2.1491 | 0.4724 | 0.7895 |
58
- | 2.3755 | 4.8 | 3000 | 2.3793 | 0.4433 | 0.7270 |
59
- | 3.3134 | 6.4 | 4000 | 3.3025 | 0.5204 | 0.8033 |
60
- | 3.4098 | 8.0 | 5000 | 3.2885 | 0.5196 | 0.8050 |
61
- | 3.1155 | 9.6 | 6000 | 3.2885 | 0.5196 | 0.8050 |
62
-
63
-
64
  ### Framework versions
65
 
66
- - Transformers 4.43.1
67
  - Pytorch 2.4.0
68
  - Datasets 2.20.0
69
- - Tokenizers 0.19.1
 
1
  ---
2
+ library_name: transformers
3
  tags:
4
  - generated_from_trainer
 
 
5
  model-index:
6
  - name: output
7
  results: []
 
10
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
  should probably proofread and complete it, then remove this comment. -->
12
 
 
13
  # output
14
 
15
  This model was trained from scratch on an unknown dataset.
16
  It achieves the following results on the evaluation set:
17
+ - eval_loss: 2.3722
18
+ - eval_model_preparation_time: 0.0044
19
+ - eval_cer: 0.3572
20
+ - eval_wer: 0.5040
21
+ - eval_runtime: 41.1882
22
+ - eval_samples_per_second: 13.887
23
+ - eval_steps_per_second: 0.874
24
+ - step: 0
25
 
26
  ## Model description
27
 
 
48
  - total_train_batch_size: 32
49
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
  - lr_scheduler_type: linear
51
+ - lr_scheduler_warmup_steps: 500
52
+ - training_steps: 1000
53
  - mixed_precision_training: Native AMP
54
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  ### Framework versions
56
 
57
+ - Transformers 4.45.2
58
  - Pytorch 2.4.0
59
  - Datasets 2.20.0
60
+ - Tokenizers 0.20.1
all_results.json CHANGED
@@ -1,16 +1,10 @@
1
  {
2
- "epoch": 9.6,
3
- "eval_cer": 0.4133216406903974,
4
- "eval_loss": 1.9947007894515991,
5
- "eval_runtime": 158.2803,
6
- "eval_samples": 3136,
7
- "eval_samples_per_second": 19.813,
8
- "eval_steps_per_second": 1.238,
9
- "eval_wer": 0.6194798466480157,
10
- "total_flos": 3.700768773245485e+19,
11
- "train_loss": 2.825458660195271,
12
- "train_runtime": 12931.7591,
13
- "train_samples": 20000,
14
- "train_samples_per_second": 14.847,
15
- "train_steps_per_second": 0.464
16
  }
 
1
  {
2
+ "eval_cer": 0.35724199025542597,
3
+ "eval_loss": 2.372227191925049,
4
+ "eval_model_preparation_time": 0.0044,
5
+ "eval_runtime": 41.1882,
6
+ "eval_samples": 572,
7
+ "eval_samples_per_second": 13.887,
8
+ "eval_steps_per_second": 0.874,
9
+ "eval_wer": 0.5040222319730876
 
 
 
 
 
 
10
  }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/m/triton/scratch/elec/puhe/p/palp3/MUCS/indicwav2vec-hindi",
3
  "activation_dropout": 0.0,
4
  "adapter_attn_dim": null,
5
  "adapter_kernel_size": 3,
 
1
  {
2
+ "_name_or_path": "/scratch/elec/puhe/p/palp3/MUCS/indicwav2vec_outputs/pd_warmup500_rerun_latest/output",
3
  "activation_dropout": 0.0,
4
  "adapter_attn_dim": null,
5
  "adapter_kernel_size": 3,
eval_results.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
- "epoch": 9.6,
3
- "eval_cer": 0.4133216406903974,
4
- "eval_loss": 1.9947007894515991,
5
- "eval_runtime": 158.2803,
6
- "eval_samples": 3136,
7
- "eval_samples_per_second": 19.813,
8
- "eval_steps_per_second": 1.238,
9
- "eval_wer": 0.6194798466480157
10
  }
 
1
  {
2
+ "eval_cer": 0.35724199025542597,
3
+ "eval_loss": 2.372227191925049,
4
+ "eval_model_preparation_time": 0.0044,
5
+ "eval_runtime": 41.1882,
6
+ "eval_samples": 572,
7
+ "eval_samples_per_second": 13.887,
8
+ "eval_steps_per_second": 0.874,
9
+ "eval_wer": 0.5040222319730876
10
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:970b2a5b5fed4e18f8e31dabc4818871b3923a71388caa434be80e585cebee68
3
  size 1262426580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81ae76905e24f861855c984d31eca140c59bb0b9d023d6e279dbc94e7647f56e
3
  size 1262426580
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88d5758118fce348aebadc5cf828d5353f39c3dd822ba51c3d9b83fec216ffa6
3
  size 5496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4f894ef290b2994d34daec3244b6a38f4886d79722ebb3a09e619200e3ab36a
3
  size 5496