Hartunka commited on
Commit
0e56feb
·
verified ·
1 Parent(s): cf7f703

End of training

Browse files
README.md CHANGED
@@ -1,12 +1,27 @@
1
  ---
 
 
2
  base_model: Hartunka/tiny_bert_km_100_v1
3
  tags:
4
  - generated_from_trainer
 
 
5
  metrics:
6
  - accuracy
7
  model-index:
8
  - name: tiny_bert_km_100_v1_rte
9
- results: []
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -14,10 +29,10 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # tiny_bert_km_100_v1_rte
16
 
17
- This model is a fine-tuned version of [Hartunka/tiny_bert_km_100_v1](https://huggingface.co/Hartunka/tiny_bert_km_100_v1) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 0.8417
20
- - Accuracy: 0.5307
21
 
22
  ## Model description
23
 
 
1
  ---
2
+ language:
3
+ - en
4
  base_model: Hartunka/tiny_bert_km_100_v1
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - glue
9
  metrics:
10
  - accuracy
11
  model-index:
12
  - name: tiny_bert_km_100_v1_rte
13
+ results:
14
+ - task:
15
+ name: Text Classification
16
+ type: text-classification
17
+ dataset:
18
+ name: GLUE RTE
19
+ type: glue
20
+ args: rte
21
+ metrics:
22
+ - name: Accuracy
23
+ type: accuracy
24
+ value: 0.5270758122743683
25
  ---
26
 
27
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
29
 
30
  # tiny_bert_km_100_v1_rte
31
 
32
+ This model is a fine-tuned version of [Hartunka/tiny_bert_km_100_v1](https://huggingface.co/Hartunka/tiny_bert_km_100_v1) on the GLUE RTE dataset.
33
  It achieves the following results on the evaluation set:
34
+ - Loss: 0.6949
35
+ - Accuracy: 0.5271
36
 
37
  ## Model description
38
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_accuracy": 0.5270758122743683,
4
+ "eval_loss": 0.694903552532196,
5
+ "eval_runtime": 0.141,
6
+ "eval_samples": 277,
7
+ "eval_samples_per_second": 1965.206,
8
+ "eval_steps_per_second": 14.189,
9
+ "total_flos": 522373675991040.0,
10
+ "train_loss": 0.6144436895847321,
11
+ "train_runtime": 21.1487,
12
+ "train_samples": 2490,
13
+ "train_samples_per_second": 5886.889,
14
+ "train_steps_per_second": 23.642
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_accuracy": 0.5270758122743683,
4
+ "eval_loss": 0.694903552532196,
5
+ "eval_runtime": 0.141,
6
+ "eval_samples": 277,
7
+ "eval_samples_per_second": 1965.206,
8
+ "eval_steps_per_second": 14.189
9
+ }
logs/events.out.tfevents.1744820845.s_004_m ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d69d49a46b3b2ec6f3fc90de98623c9a4aa377a8b898877d9f31bae0f203d86
3
+ size 249
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "total_flos": 522373675991040.0,
4
+ "train_loss": 0.6144436895847321,
5
+ "train_runtime": 21.1487,
6
+ "train_samples": 2490,
7
+ "train_samples_per_second": 5886.889,
8
+ "train_steps_per_second": 23.642
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.694903552532196,
3
+ "best_model_checkpoint": "tiny_bert_km_100_v1_rte/checkpoint-30",
4
+ "epoch": 8.0,
5
+ "eval_steps": 500,
6
+ "global_step": 80,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 0.6791782975196838,
14
+ "learning_rate": 4.9e-05,
15
+ "loss": 0.699,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.5270758122743683,
21
+ "eval_loss": 0.6959471106529236,
22
+ "eval_runtime": 0.1358,
23
+ "eval_samples_per_second": 2040.346,
24
+ "eval_steps_per_second": 14.732,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 2.0,
29
+ "grad_norm": 0.7037239074707031,
30
+ "learning_rate": 4.8e-05,
31
+ "loss": 0.6852,
32
+ "step": 20
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "eval_accuracy": 0.4981949458483754,
37
+ "eval_loss": 0.7013058662414551,
38
+ "eval_runtime": 0.1226,
39
+ "eval_samples_per_second": 2259.809,
40
+ "eval_steps_per_second": 16.316,
41
+ "step": 20
42
+ },
43
+ {
44
+ "epoch": 3.0,
45
+ "grad_norm": 0.6154415011405945,
46
+ "learning_rate": 4.7e-05,
47
+ "loss": 0.6713,
48
+ "step": 30
49
+ },
50
+ {
51
+ "epoch": 3.0,
52
+ "eval_accuracy": 0.5270758122743683,
53
+ "eval_loss": 0.694903552532196,
54
+ "eval_runtime": 0.1148,
55
+ "eval_samples_per_second": 2412.236,
56
+ "eval_steps_per_second": 17.417,
57
+ "step": 30
58
+ },
59
+ {
60
+ "epoch": 4.0,
61
+ "grad_norm": 0.636819064617157,
62
+ "learning_rate": 4.600000000000001e-05,
63
+ "loss": 0.6531,
64
+ "step": 40
65
+ },
66
+ {
67
+ "epoch": 4.0,
68
+ "eval_accuracy": 0.5451263537906137,
69
+ "eval_loss": 0.7031038403511047,
70
+ "eval_runtime": 0.1179,
71
+ "eval_samples_per_second": 2348.643,
72
+ "eval_steps_per_second": 16.958,
73
+ "step": 40
74
+ },
75
+ {
76
+ "epoch": 5.0,
77
+ "grad_norm": 0.8013365268707275,
78
+ "learning_rate": 4.5e-05,
79
+ "loss": 0.6265,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 5.0,
84
+ "eval_accuracy": 0.5234657039711191,
85
+ "eval_loss": 0.708032488822937,
86
+ "eval_runtime": 0.1174,
87
+ "eval_samples_per_second": 2359.336,
88
+ "eval_steps_per_second": 17.035,
89
+ "step": 50
90
+ },
91
+ {
92
+ "epoch": 6.0,
93
+ "grad_norm": 0.9887241125106812,
94
+ "learning_rate": 4.4000000000000006e-05,
95
+ "loss": 0.5887,
96
+ "step": 60
97
+ },
98
+ {
99
+ "epoch": 6.0,
100
+ "eval_accuracy": 0.516245487364621,
101
+ "eval_loss": 0.7479658126831055,
102
+ "eval_runtime": 0.1163,
103
+ "eval_samples_per_second": 2380.993,
104
+ "eval_steps_per_second": 17.191,
105
+ "step": 60
106
+ },
107
+ {
108
+ "epoch": 7.0,
109
+ "grad_norm": 1.28850519657135,
110
+ "learning_rate": 4.3e-05,
111
+ "loss": 0.5347,
112
+ "step": 70
113
+ },
114
+ {
115
+ "epoch": 7.0,
116
+ "eval_accuracy": 0.5342960288808665,
117
+ "eval_loss": 0.7832542657852173,
118
+ "eval_runtime": 0.125,
119
+ "eval_samples_per_second": 2216.571,
120
+ "eval_steps_per_second": 16.004,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 8.0,
125
+ "grad_norm": 1.8739584684371948,
126
+ "learning_rate": 4.2e-05,
127
+ "loss": 0.457,
128
+ "step": 80
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "eval_accuracy": 0.5306859205776173,
133
+ "eval_loss": 0.8416646718978882,
134
+ "eval_runtime": 0.1169,
135
+ "eval_samples_per_second": 2369.547,
136
+ "eval_steps_per_second": 17.109,
137
+ "step": 80
138
+ },
139
+ {
140
+ "epoch": 8.0,
141
+ "step": 80,
142
+ "total_flos": 522373675991040.0,
143
+ "train_loss": 0.6144436895847321,
144
+ "train_runtime": 21.1487,
145
+ "train_samples_per_second": 5886.889,
146
+ "train_steps_per_second": 23.642
147
+ }
148
+ ],
149
+ "logging_steps": 1,
150
+ "max_steps": 500,
151
+ "num_input_tokens_seen": 0,
152
+ "num_train_epochs": 50,
153
+ "save_steps": 500,
154
+ "total_flos": 522373675991040.0,
155
+ "train_batch_size": 256,
156
+ "trial_name": null,
157
+ "trial_params": null
158
+ }