CNR-ILC commited on
Commit
45a9f0f
·
verified ·
1 Parent(s): e86fbca

ILC-CNR/gs-greBERTa

Browse files
README.md CHANGED
@@ -16,13 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model is a fine-tuned version of [bowphs/GreBerta](https://huggingface.co/bowphs/GreBerta) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 2.4879
20
- - Top1 Acc: 0.3563
21
- - Top5 Acc: 0.6438
22
- - Top10 Acc: 0.775
23
- - Top15 Acc: 0.8688
24
- - Top20 Acc: 0.9125
25
- - Top25 Acc: 0.9187
26
 
27
  ## Model description
28
 
@@ -41,7 +35,7 @@ More information needed
41
  ### Training hyperparameters
42
 
43
  The following hyperparameters were used during training:
44
- - learning_rate: 5e-05
45
  - train_batch_size: 16
46
  - eval_batch_size: 8
47
  - seed: 42
@@ -52,23 +46,23 @@ The following hyperparameters were used during training:
52
 
53
  ### Training results
54
 
55
- | Training Loss | Epoch | Step | Validation Loss | Top1 Acc | Top5 Acc | Top10 Acc | Top15 Acc | Top20 Acc | Top25 Acc |
56
- |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:|:---------:|:---------:|:---------:|:---------:|
57
- | 1.2617 | 1.0 | 5710 | 0.9286 | 0.8304 | 0.8947 | 0.9064 | 0.9415 | 0.9532 | 0.9532 |
58
- | 0.938 | 2.0 | 11420 | 0.8058 | 0.8071 | 0.9357 | 0.9643 | 0.9643 | 0.9714 | 0.9786 |
59
- | 0.825 | 3.0 | 17130 | 0.7296 | 0.8375 | 0.95 | 0.9625 | 0.9812 | 0.9875 | 0.9875 |
60
- | 0.765 | 4.0 | 22840 | 0.6903 | 0.775 | 0.8875 | 0.9437 | 0.95 | 0.95 | 0.9563 |
61
- | 0.7226 | 5.0 | 28550 | 0.6579 | 0.8026 | 0.9342 | 0.9539 | 0.9605 | 0.9605 | 0.9605 |
62
- | 0.6998 | 6.0 | 34260 | 0.6540 | 0.8088 | 0.9265 | 0.9412 | 0.9485 | 0.9559 | 0.9632 |
63
- | 1.1697 | 7.0 | 39970 | 2.6136 | 0.4178 | 0.6301 | 0.7397 | 0.8288 | 0.8562 | 0.8699 |
64
- | 2.3523 | 8.0 | 45680 | 2.4223 | 0.3764 | 0.5843 | 0.7191 | 0.7753 | 0.8146 | 0.8483 |
65
- | 2.5091 | 9.0 | 51390 | 2.3962 | 0.4286 | 0.6599 | 0.7619 | 0.8299 | 0.8776 | 0.9116 |
66
- | 2.5581 | 10.0 | 57100 | 2.4889 | 0.34 | 0.6 | 0.7667 | 0.8267 | 0.8533 | 0.9 |
67
 
68
 
69
  ### Framework versions
70
 
71
  - Transformers 4.51.3
72
  - Pytorch 2.7.0+cu126
73
- - Datasets 3.5.1
74
  - Tokenizers 0.21.1
 
16
 
17
  This model is a fine-tuned version of [bowphs/GreBerta](https://huggingface.co/bowphs/GreBerta) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.5878
 
 
 
 
 
 
20
 
21
  ## Model description
22
 
 
35
  ### Training hyperparameters
36
 
37
  The following hyperparameters were used during training:
38
+ - learning_rate: 4e-05
39
  - train_batch_size: 16
40
  - eval_batch_size: 8
41
  - seed: 42
 
46
 
47
  ### Training results
48
 
49
+ | Training Loss | Epoch | Step | Validation Loss |
50
+ |:-------------:|:-----:|:-----:|:---------------:|
51
+ | 1.2756 | 1.0 | 5710 | 0.9367 |
52
+ | 0.9522 | 2.0 | 11420 | 0.8217 |
53
+ | 0.8493 | 3.0 | 17130 | 0.7453 |
54
+ | 0.781 | 4.0 | 22840 | 0.6875 |
55
+ | 0.7316 | 5.0 | 28550 | 0.6624 |
56
+ | 0.6985 | 6.0 | 34260 | 0.6378 |
57
+ | 0.6736 | 7.0 | 39970 | 0.6155 |
58
+ | 0.6502 | 8.0 | 45680 | 0.6063 |
59
+ | 0.6345 | 9.0 | 51390 | 0.5895 |
60
+ | 0.6232 | 10.0 | 57100 | 0.5866 |
61
 
62
 
63
  ### Framework versions
64
 
65
  - Transformers 4.51.3
66
  - Pytorch 2.7.0+cu126
67
+ - Datasets 3.6.0
68
  - Tokenizers 0.21.1
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 10.0,
3
- "eval_loss": 2.4879400730133057,
4
- "eval_runtime": 4368.2627,
5
- "eval_samples_per_second": 2.341,
6
- "eval_steps_per_second": 0.293,
7
  "eval_top10_acc": 0.775,
8
  "eval_top15_acc": 0.86875,
9
  "eval_top1_acc": 0.35625,
@@ -12,8 +12,8 @@
12
  "eval_top5_acc": 0.64375,
13
  "step": 57100,
14
  "total_flos": 6.01310491705344e+16,
15
- "train_loss": 1.3801261920561514,
16
- "train_runtime": 49255.6771,
17
- "train_samples_per_second": 18.55,
18
- "train_steps_per_second": 1.159
19
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "eval_loss": 0.5878283381462097,
4
+ "eval_runtime": 36.4992,
5
+ "eval_samples_per_second": 280.116,
6
+ "eval_steps_per_second": 35.014,
7
  "eval_top10_acc": 0.775,
8
  "eval_top15_acc": 0.86875,
9
  "eval_top1_acc": 0.35625,
 
12
  "eval_top5_acc": 0.64375,
13
  "step": 57100,
14
  "total_flos": 6.01310491705344e+16,
15
+ "train_loss": 0.7869737379939251,
16
+ "train_runtime": 10124.3653,
17
+ "train_samples_per_second": 90.247,
18
+ "train_steps_per_second": 5.64
19
  }
eval_results.json CHANGED
@@ -1,13 +1,7 @@
1
  {
2
  "epoch": 10.0,
3
- "eval_loss": 2.4879400730133057,
4
- "eval_runtime": 4368.2627,
5
- "eval_samples_per_second": 2.341,
6
- "eval_steps_per_second": 0.293,
7
- "eval_top10_acc": 0.775,
8
- "eval_top15_acc": 0.86875,
9
- "eval_top1_acc": 0.35625,
10
- "eval_top20_acc": 0.9125,
11
- "eval_top25_acc": 0.91875,
12
- "eval_top5_acc": 0.64375
13
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "eval_loss": 0.5878283381462097,
4
+ "eval_runtime": 36.4992,
5
+ "eval_samples_per_second": 280.116,
6
+ "eval_steps_per_second": 35.014
 
 
 
 
 
 
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0e941c7589f36b7e6d63a780831db82f151251110cc805d2b46fa864d38bc23
3
  size 504150808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:643e2522fe0e5dca911e3232fa230abe289b64cadad8156ac1d31580e5549d40
3
  size 504150808
train_results.json CHANGED
@@ -2,8 +2,8 @@
2
  "epoch": 10.0,
3
  "step": 57100,
4
  "total_flos": 6.01310491705344e+16,
5
- "train_loss": 1.3801261920561514,
6
- "train_runtime": 49255.6771,
7
- "train_samples_per_second": 18.55,
8
- "train_steps_per_second": 1.159
9
  }
 
2
  "epoch": 10.0,
3
  "step": 57100,
4
  "total_flos": 6.01310491705344e+16,
5
+ "train_loss": 0.7869737379939251,
6
+ "train_runtime": 10124.3653,
7
+ "train_samples_per_second": 90.247,
8
+ "train_steps_per_second": 5.64
9
  }
trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "best_global_step": null,
3
- "best_metric": null,
4
  "best_model_checkpoint": null,
5
  "epoch": 10.0,
6
  "eval_steps": 500,
@@ -11,235 +11,169 @@
11
  "log_history": [
12
  {
13
  "epoch": 1.0,
14
- "grad_norm": 4.705949783325195,
15
- "learning_rate": 4.500700525394046e-05,
16
- "loss": 1.2617,
17
  "step": 5710
18
  },
19
  {
20
  "epoch": 1.0,
21
- "eval_loss": 0.928647518157959,
22
- "eval_runtime": 4234.8005,
23
- "eval_samples_per_second": 2.414,
24
- "eval_steps_per_second": 0.302,
25
- "eval_top10_acc": 0.9064327485380117,
26
- "eval_top15_acc": 0.9415204678362573,
27
- "eval_top1_acc": 0.8304093567251462,
28
- "eval_top20_acc": 0.9532163742690059,
29
- "eval_top25_acc": 0.9532163742690059,
30
- "eval_top5_acc": 0.8947368421052632,
31
  "step": 5710
32
  },
33
  {
34
  "epoch": 2.0,
35
- "grad_norm": 5.407649040222168,
36
- "learning_rate": 4.0007005253940456e-05,
37
- "loss": 0.938,
38
  "step": 11420
39
  },
40
  {
41
  "epoch": 2.0,
42
- "eval_loss": 0.8057610392570496,
43
- "eval_runtime": 4258.0614,
44
- "eval_samples_per_second": 2.401,
45
- "eval_steps_per_second": 0.3,
46
- "eval_top10_acc": 0.9642857142857143,
47
- "eval_top15_acc": 0.9642857142857143,
48
- "eval_top1_acc": 0.8071428571428572,
49
- "eval_top20_acc": 0.9714285714285714,
50
- "eval_top25_acc": 0.9785714285714285,
51
- "eval_top5_acc": 0.9357142857142857,
52
  "step": 11420
53
  },
54
  {
55
  "epoch": 3.0,
56
- "grad_norm": 4.782800674438477,
57
- "learning_rate": 3.500875656742557e-05,
58
- "loss": 0.825,
59
  "step": 17130
60
  },
61
  {
62
  "epoch": 3.0,
63
- "eval_loss": 0.7296221256256104,
64
- "eval_runtime": 4280.2,
65
- "eval_samples_per_second": 2.389,
66
- "eval_steps_per_second": 0.299,
67
- "eval_top10_acc": 0.9625,
68
- "eval_top15_acc": 0.98125,
69
- "eval_top1_acc": 0.8375,
70
- "eval_top20_acc": 0.9875,
71
- "eval_top25_acc": 0.9875,
72
- "eval_top5_acc": 0.95,
73
  "step": 17130
74
  },
75
  {
76
  "epoch": 4.0,
77
- "grad_norm": 4.224056243896484,
78
- "learning_rate": 3.0011383537653244e-05,
79
- "loss": 0.765,
80
  "step": 22840
81
  },
82
  {
83
  "epoch": 4.0,
84
- "eval_loss": 0.6902998089790344,
85
- "eval_runtime": 4258.1988,
86
- "eval_samples_per_second": 2.401,
87
- "eval_steps_per_second": 0.3,
88
- "eval_top10_acc": 0.94375,
89
- "eval_top15_acc": 0.95,
90
- "eval_top1_acc": 0.775,
91
- "eval_top20_acc": 0.95,
92
- "eval_top25_acc": 0.95625,
93
- "eval_top5_acc": 0.8875,
94
  "step": 22840
95
  },
96
  {
97
  "epoch": 5.0,
98
- "grad_norm": 4.620619773864746,
99
- "learning_rate": 2.5013134851138352e-05,
100
- "loss": 0.7226,
101
  "step": 28550
102
  },
103
  {
104
  "epoch": 5.0,
105
- "eval_loss": 0.6579228639602661,
106
- "eval_runtime": 4227.7384,
107
- "eval_samples_per_second": 2.418,
108
- "eval_steps_per_second": 0.302,
109
- "eval_top10_acc": 0.9539473684210527,
110
- "eval_top15_acc": 0.9605263157894737,
111
- "eval_top1_acc": 0.8026315789473685,
112
- "eval_top20_acc": 0.9605263157894737,
113
- "eval_top25_acc": 0.9605263157894737,
114
- "eval_top5_acc": 0.9342105263157895,
115
  "step": 28550
116
  },
117
  {
118
  "epoch": 6.0,
119
- "grad_norm": 4.630845546722412,
120
- "learning_rate": 2.001751313485114e-05,
121
- "loss": 0.6998,
122
  "step": 34260
123
  },
124
  {
125
  "epoch": 6.0,
126
- "eval_loss": 0.6539566516876221,
127
- "eval_runtime": 4239.6205,
128
- "eval_samples_per_second": 2.412,
129
- "eval_steps_per_second": 0.301,
130
- "eval_top10_acc": 0.9411764705882353,
131
- "eval_top15_acc": 0.9485294117647058,
132
- "eval_top1_acc": 0.8088235294117647,
133
- "eval_top20_acc": 0.9558823529411765,
134
- "eval_top25_acc": 0.9632352941176471,
135
- "eval_top5_acc": 0.9264705882352942,
136
  "step": 34260
137
  },
138
  {
139
  "epoch": 7.0,
140
- "grad_norm": 5.699009895324707,
141
- "learning_rate": 1.5021891418563924e-05,
142
- "loss": 1.1697,
143
  "step": 39970
144
  },
145
  {
146
  "epoch": 7.0,
147
- "eval_loss": 2.613582134246826,
148
- "eval_runtime": 4560.5825,
149
- "eval_samples_per_second": 2.242,
150
- "eval_steps_per_second": 0.28,
151
- "eval_top10_acc": 0.7397260273972602,
152
- "eval_top15_acc": 0.8287671232876712,
153
- "eval_top1_acc": 0.4178082191780822,
154
- "eval_top20_acc": 0.8561643835616438,
155
- "eval_top25_acc": 0.8698630136986302,
156
- "eval_top5_acc": 0.6301369863013698,
157
  "step": 39970
158
  },
159
  {
160
  "epoch": 8.0,
161
- "grad_norm": 5.335746765136719,
162
- "learning_rate": 1.002276707530648e-05,
163
- "loss": 2.3523,
164
  "step": 45680
165
  },
166
  {
167
  "epoch": 8.0,
168
- "eval_loss": 2.4223251342773438,
169
- "eval_runtime": 4532.3712,
170
- "eval_samples_per_second": 2.256,
171
- "eval_steps_per_second": 0.282,
172
- "eval_top10_acc": 0.7191011235955056,
173
- "eval_top15_acc": 0.7752808988764045,
174
- "eval_top1_acc": 0.37640449438202245,
175
- "eval_top20_acc": 0.8146067415730337,
176
- "eval_top25_acc": 0.848314606741573,
177
- "eval_top5_acc": 0.5842696629213483,
178
  "step": 45680
179
  },
180
  {
181
  "epoch": 9.0,
182
- "grad_norm": 6.699865818023682,
183
- "learning_rate": 5.024518388791594e-06,
184
- "loss": 2.5091,
185
  "step": 51390
186
  },
187
  {
188
  "epoch": 9.0,
189
- "eval_loss": 2.396181344985962,
190
- "eval_runtime": 4441.5711,
191
- "eval_samples_per_second": 2.302,
192
- "eval_steps_per_second": 0.288,
193
- "eval_top10_acc": 0.7619047619047619,
194
- "eval_top15_acc": 0.8299319727891157,
195
- "eval_top1_acc": 0.42857142857142855,
196
- "eval_top20_acc": 0.8775510204081632,
197
- "eval_top25_acc": 0.9115646258503401,
198
- "eval_top5_acc": 0.6598639455782312,
199
  "step": 51390
200
  },
201
  {
202
  "epoch": 10.0,
203
- "grad_norm": 4.715834140777588,
204
- "learning_rate": 2.6269702276707533e-08,
205
- "loss": 2.5581,
206
  "step": 57100
207
  },
208
  {
209
  "epoch": 10.0,
210
- "eval_loss": 2.488941192626953,
211
- "eval_runtime": 4366.5233,
212
- "eval_samples_per_second": 2.341,
213
- "eval_steps_per_second": 0.293,
214
- "eval_top10_acc": 0.7666666666666667,
215
- "eval_top15_acc": 0.8266666666666667,
216
- "eval_top1_acc": 0.34,
217
- "eval_top20_acc": 0.8533333333333334,
218
- "eval_top25_acc": 0.9,
219
- "eval_top5_acc": 0.6,
220
  "step": 57100
221
  },
222
  {
223
  "epoch": 10.0,
224
  "step": 57100,
225
  "total_flos": 6.01310491705344e+16,
226
- "train_loss": 1.3801261920561514,
227
- "train_runtime": 49255.6771,
228
- "train_samples_per_second": 18.55,
229
- "train_steps_per_second": 1.159
230
  },
231
  {
232
  "epoch": 10.0,
233
- "eval_loss": 2.4879400730133057,
234
- "eval_runtime": 4368.2627,
235
- "eval_samples_per_second": 2.341,
236
- "eval_steps_per_second": 0.293,
237
- "eval_top10_acc": 0.775,
238
- "eval_top15_acc": 0.86875,
239
- "eval_top1_acc": 0.35625,
240
- "eval_top20_acc": 0.9125,
241
- "eval_top25_acc": 0.91875,
242
- "eval_top5_acc": 0.64375,
243
  "step": 57100
244
  }
245
  ],
@@ -249,6 +183,15 @@
249
  "num_train_epochs": 10,
250
  "save_steps": 500,
251
  "stateful_callbacks": {
 
 
 
 
 
 
 
 
 
252
  "TrainerControl": {
253
  "args": {
254
  "should_epoch_stop": false,
 
1
  {
2
  "best_global_step": null,
3
+ "best_metric": 0.5865987539291382,
4
  "best_model_checkpoint": null,
5
  "epoch": 10.0,
6
  "eval_steps": 500,
 
11
  "log_history": [
12
  {
13
  "epoch": 1.0,
14
+ "grad_norm": 5.180218696594238,
15
+ "learning_rate": 3.600420315236428e-05,
16
+ "loss": 1.2756,
17
  "step": 5710
18
  },
19
  {
20
  "epoch": 1.0,
21
+ "eval_loss": 0.9367296695709229,
22
+ "eval_runtime": 76.8888,
23
+ "eval_samples_per_second": 132.971,
24
+ "eval_steps_per_second": 16.621,
 
 
 
 
 
 
25
  "step": 5710
26
  },
27
  {
28
  "epoch": 2.0,
29
+ "grad_norm": 5.356524467468262,
30
+ "learning_rate": 3.200630472854641e-05,
31
+ "loss": 0.9522,
32
  "step": 11420
33
  },
34
  {
35
  "epoch": 2.0,
36
+ "eval_loss": 0.8216637969017029,
37
+ "eval_runtime": 73.2993,
38
+ "eval_samples_per_second": 139.483,
39
+ "eval_steps_per_second": 17.435,
 
 
 
 
 
 
40
  "step": 11420
41
  },
42
  {
43
  "epoch": 3.0,
44
+ "grad_norm": 4.990707874298096,
45
+ "learning_rate": 2.800700525394046e-05,
46
+ "loss": 0.8493,
47
  "step": 17130
48
  },
49
  {
50
  "epoch": 3.0,
51
+ "eval_loss": 0.7453346848487854,
52
+ "eval_runtime": 76.8549,
53
+ "eval_samples_per_second": 133.03,
54
+ "eval_steps_per_second": 16.629,
 
 
 
 
 
 
55
  "step": 17130
56
  },
57
  {
58
  "epoch": 4.0,
59
+ "grad_norm": 4.565978527069092,
60
+ "learning_rate": 2.4009106830122595e-05,
61
+ "loss": 0.781,
62
  "step": 22840
63
  },
64
  {
65
  "epoch": 4.0,
66
+ "eval_loss": 0.6874940991401672,
67
+ "eval_runtime": 38.7504,
68
+ "eval_samples_per_second": 263.842,
69
+ "eval_steps_per_second": 32.98,
 
 
 
 
 
 
70
  "step": 22840
71
  },
72
  {
73
  "epoch": 5.0,
74
+ "grad_norm": 4.659147262573242,
75
+ "learning_rate": 2.0010507880910684e-05,
76
+ "loss": 0.7316,
77
  "step": 28550
78
  },
79
  {
80
  "epoch": 5.0,
81
+ "eval_loss": 0.6624111533164978,
82
+ "eval_runtime": 36.5212,
83
+ "eval_samples_per_second": 279.947,
84
+ "eval_steps_per_second": 34.993,
 
 
 
 
 
 
85
  "step": 28550
86
  },
87
  {
88
  "epoch": 6.0,
89
+ "grad_norm": 4.955957412719727,
90
+ "learning_rate": 1.6011908931698776e-05,
91
+ "loss": 0.6985,
92
  "step": 34260
93
  },
94
  {
95
  "epoch": 6.0,
96
+ "eval_loss": 0.6377598643302917,
97
+ "eval_runtime": 35.9349,
98
+ "eval_samples_per_second": 284.514,
99
+ "eval_steps_per_second": 35.564,
 
 
 
 
 
 
100
  "step": 34260
101
  },
102
  {
103
  "epoch": 7.0,
104
+ "grad_norm": 4.549160003662109,
105
+ "learning_rate": 1.2012609457092821e-05,
106
+ "loss": 0.6736,
107
  "step": 39970
108
  },
109
  {
110
  "epoch": 7.0,
111
+ "eval_loss": 0.6155329346656799,
112
+ "eval_runtime": 37.8645,
113
+ "eval_samples_per_second": 270.015,
114
+ "eval_steps_per_second": 33.752,
 
 
 
 
 
 
115
  "step": 39970
116
  },
117
  {
118
  "epoch": 8.0,
119
+ "grad_norm": 4.006523609161377,
120
+ "learning_rate": 8.01260945709282e-06,
121
+ "loss": 0.6502,
122
  "step": 45680
123
  },
124
  {
125
  "epoch": 8.0,
126
+ "eval_loss": 0.6062974333763123,
127
+ "eval_runtime": 36.5102,
128
+ "eval_samples_per_second": 280.031,
129
+ "eval_steps_per_second": 35.004,
 
 
 
 
 
 
130
  "step": 45680
131
  },
132
  {
133
  "epoch": 9.0,
134
+ "grad_norm": 3.7265231609344482,
135
+ "learning_rate": 4.013309982486865e-06,
136
+ "loss": 0.6345,
137
  "step": 51390
138
  },
139
  {
140
  "epoch": 9.0,
141
+ "eval_loss": 0.5894550681114197,
142
+ "eval_runtime": 43.2376,
143
+ "eval_samples_per_second": 236.461,
144
+ "eval_steps_per_second": 29.558,
 
 
 
 
 
 
145
  "step": 51390
146
  },
147
  {
148
  "epoch": 10.0,
149
+ "grad_norm": 3.897273302078247,
150
+ "learning_rate": 1.611208406304729e-08,
151
+ "loss": 0.6232,
152
  "step": 57100
153
  },
154
  {
155
  "epoch": 10.0,
156
+ "eval_loss": 0.5865987539291382,
157
+ "eval_runtime": 36.5729,
158
+ "eval_samples_per_second": 279.551,
159
+ "eval_steps_per_second": 34.944,
 
 
 
 
 
 
160
  "step": 57100
161
  },
162
  {
163
  "epoch": 10.0,
164
  "step": 57100,
165
  "total_flos": 6.01310491705344e+16,
166
+ "train_loss": 0.7869737379939251,
167
+ "train_runtime": 10124.3653,
168
+ "train_samples_per_second": 90.247,
169
+ "train_steps_per_second": 5.64
170
  },
171
  {
172
  "epoch": 10.0,
173
+ "eval_loss": 0.5878283381462097,
174
+ "eval_runtime": 36.4992,
175
+ "eval_samples_per_second": 280.116,
176
+ "eval_steps_per_second": 35.014,
 
 
 
 
 
 
177
  "step": 57100
178
  }
179
  ],
 
183
  "num_train_epochs": 10,
184
  "save_steps": 500,
185
  "stateful_callbacks": {
186
+ "EarlyStoppingCallback": {
187
+ "args": {
188
+ "early_stopping_patience": 2,
189
+ "early_stopping_threshold": 0.0
190
+ },
191
+ "attributes": {
192
+ "early_stopping_patience_counter": 0
193
+ }
194
+ },
195
  "TrainerControl": {
196
  "args": {
197
  "should_epoch_stop": false,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d86f31b8645edb5a8bc695e48ee26e49fb8c60618206647233a2cba8872a191c
3
  size 5649
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f8e8baf6eeb9717a999831a644b81e1618482d42a4c4d6f0efb269f9d424652
3
  size 5649