corranm commited on
Commit
92fb907
·
verified ·
1 Parent(s): 3d91d4b

End of training

Browse files
README.md ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - accuracy
7
+ model-index:
8
+ - name: vit-tiny-patch16-224
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # vit-tiny-patch16-224
16
+
17
+ This model was trained from scratch on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 1.6141
20
+ - F1 Macro: 0.4385
21
+ - F1 Micro: 0.5303
22
+ - F1 Weighted: 0.4856
23
+ - Precision Macro: 0.5225
24
+ - Precision Micro: 0.5303
25
+ - Precision Weighted: 0.5788
26
+ - Recall Macro: 0.4858
27
+ - Recall Micro: 0.5303
28
+ - Recall Weighted: 0.5303
29
+ - Accuracy: 0.5303
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 0.0001
49
+ - train_batch_size: 8
50
+ - eval_batch_size: 8
51
+ - seed: 42
52
+ - gradient_accumulation_steps: 2
53
+ - total_train_batch_size: 16
54
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
55
+ - lr_scheduler_type: linear
56
+ - lr_scheduler_warmup_ratio: 0.1
57
+ - num_epochs: 25
58
+
59
+ ### Training results
60
+
61
+ | Training Loss | Epoch | Step | Validation Loss | F1 Macro | F1 Micro | F1 Weighted | Precision Macro | Precision Micro | Precision Weighted | Recall Macro | Recall Micro | Recall Weighted | Accuracy |
62
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:|:-----------:|:---------------:|:---------------:|:------------------:|:------------:|:------------:|:---------------:|:--------:|
63
+ | 1.9719 | 1.0 | 29 | 1.9209 | 0.0949 | 0.2197 | 0.1155 | 0.0891 | 0.2197 | 0.1051 | 0.1722 | 0.2197 | 0.2197 | 0.2197 |
64
+ | 1.8717 | 2.0 | 58 | 2.0378 | 0.0953 | 0.1970 | 0.1069 | 0.1996 | 0.1970 | 0.2660 | 0.1794 | 0.1970 | 0.1970 | 0.1970 |
65
+ | 1.9326 | 3.0 | 87 | 1.7680 | 0.2290 | 0.3939 | 0.2939 | 0.2151 | 0.3939 | 0.2682 | 0.3004 | 0.3939 | 0.3939 | 0.3939 |
66
+ | 1.2873 | 4.0 | 116 | 1.5892 | 0.3502 | 0.4470 | 0.4082 | 0.4831 | 0.4470 | 0.5140 | 0.3646 | 0.4470 | 0.4470 | 0.4470 |
67
+ | 1.3997 | 5.0 | 145 | 1.4773 | 0.3481 | 0.5 | 0.4245 | 0.3463 | 0.5 | 0.4119 | 0.4052 | 0.5 | 0.5 | 0.5 |
68
+ | 1.7041 | 6.0 | 174 | 1.4406 | 0.4266 | 0.5379 | 0.5005 | 0.5011 | 0.5379 | 0.5628 | 0.4529 | 0.5379 | 0.5379 | 0.5379 |
69
+ | 1.1863 | 7.0 | 203 | 1.3680 | 0.4759 | 0.5682 | 0.5400 | 0.5559 | 0.5682 | 0.6032 | 0.4831 | 0.5682 | 0.5682 | 0.5682 |
70
+ | 0.9817 | 8.0 | 232 | 1.3515 | 0.4399 | 0.5227 | 0.4969 | 0.4445 | 0.5227 | 0.5088 | 0.4722 | 0.5227 | 0.5227 | 0.5227 |
71
+ | 0.617 | 9.0 | 261 | 1.3867 | 0.4895 | 0.5909 | 0.5555 | 0.5136 | 0.5909 | 0.5776 | 0.5183 | 0.5909 | 0.5909 | 0.5909 |
72
+ | 1.0365 | 10.0 | 290 | 1.4607 | 0.4313 | 0.5379 | 0.4961 | 0.4371 | 0.5379 | 0.4997 | 0.4674 | 0.5379 | 0.5379 | 0.5379 |
73
+ | 0.6815 | 11.0 | 319 | 1.3133 | 0.4962 | 0.5909 | 0.5664 | 0.5087 | 0.5909 | 0.5742 | 0.5133 | 0.5909 | 0.5909 | 0.5909 |
74
+ | 0.4153 | 12.0 | 348 | 1.3528 | 0.5082 | 0.5909 | 0.5735 | 0.5185 | 0.5909 | 0.5820 | 0.5202 | 0.5909 | 0.5909 | 0.5909 |
75
+ | 0.3396 | 13.0 | 377 | 1.3856 | 0.5372 | 0.5909 | 0.5830 | 0.5623 | 0.5909 | 0.6018 | 0.5387 | 0.5909 | 0.5909 | 0.5909 |
76
+ | 0.5415 | 14.0 | 406 | 1.4252 | 0.5132 | 0.5909 | 0.5795 | 0.5223 | 0.5909 | 0.5893 | 0.5255 | 0.5909 | 0.5909 | 0.5909 |
77
+ | 0.4421 | 15.0 | 435 | 1.4081 | 0.5574 | 0.6136 | 0.6086 | 0.5753 | 0.6136 | 0.6149 | 0.5532 | 0.6136 | 0.6136 | 0.6136 |
78
+ | 0.2893 | 16.0 | 464 | 1.5285 | 0.5127 | 0.5985 | 0.5833 | 0.5059 | 0.5985 | 0.5752 | 0.5253 | 0.5985 | 0.5985 | 0.5985 |
79
+ | 0.2403 | 17.0 | 493 | 1.4820 | 0.5395 | 0.6288 | 0.6065 | 0.5808 | 0.6288 | 0.6380 | 0.5460 | 0.6288 | 0.6288 | 0.6288 |
80
+ | 0.1087 | 18.0 | 522 | 1.3999 | 0.5320 | 0.6061 | 0.6009 | 0.5612 | 0.6061 | 0.6211 | 0.5261 | 0.6061 | 0.6061 | 0.6061 |
81
+ | 0.2619 | 19.0 | 551 | 1.4408 | 0.5618 | 0.6136 | 0.6037 | 0.6154 | 0.6136 | 0.6225 | 0.5501 | 0.6136 | 0.6136 | 0.6136 |
82
+ | 0.1154 | 20.0 | 580 | 1.4516 | 0.5402 | 0.6288 | 0.6090 | 0.5538 | 0.6288 | 0.6145 | 0.5492 | 0.6288 | 0.6288 | 0.6288 |
83
+ | 0.1367 | 21.0 | 609 | 1.5306 | 0.5254 | 0.6136 | 0.5942 | 0.5321 | 0.6136 | 0.5923 | 0.5340 | 0.6136 | 0.6136 | 0.6136 |
84
+ | 0.0839 | 22.0 | 638 | 1.6397 | 0.5154 | 0.5833 | 0.5756 | 0.5274 | 0.5833 | 0.5895 | 0.5252 | 0.5833 | 0.5833 | 0.5833 |
85
+ | 0.1818 | 23.0 | 667 | 1.6416 | 0.5656 | 0.6515 | 0.6359 | 0.5848 | 0.6515 | 0.6456 | 0.5696 | 0.6515 | 0.6515 | 0.6515 |
86
+ | 0.0781 | 24.0 | 696 | 1.6026 | 0.5393 | 0.6212 | 0.6079 | 0.5524 | 0.6212 | 0.6118 | 0.5412 | 0.6212 | 0.6212 | 0.6212 |
87
+ | 0.0792 | 25.0 | 725 | 1.5997 | 0.5494 | 0.6288 | 0.6180 | 0.5716 | 0.6288 | 0.6297 | 0.5480 | 0.6288 | 0.6288 | 0.6288 |
88
+
89
+
90
+ ### Framework versions
91
+
92
+ - Transformers 4.48.2
93
+ - Pytorch 2.6.0+cu124
94
+ - Datasets 3.2.0
95
+ - Tokenizers 0.21.0
all_results.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 25.0,
3
+ "eval_accuracy": 0.5303030303030303,
4
+ "eval_f1_macro": 0.438453500522466,
5
+ "eval_f1_micro": 0.5303030303030303,
6
+ "eval_f1_weighted": 0.4855651604867906,
7
+ "eval_loss": 1.6140881776809692,
8
+ "eval_precision_macro": 0.5224921277552858,
9
+ "eval_precision_micro": 0.5303030303030303,
10
+ "eval_precision_weighted": 0.5787633419212366,
11
+ "eval_recall_macro": 0.4858276643990929,
12
+ "eval_recall_micro": 0.5303030303030303,
13
+ "eval_recall_weighted": 0.5303030303030303,
14
+ "eval_runtime": 1.1271,
15
+ "eval_samples_per_second": 58.556,
16
+ "eval_steps_per_second": 7.985,
17
+ "total_flos": 5.76425379898368e+16,
18
+ "train_loss": 0.7207291752639515,
19
+ "train_runtime": 641.8354,
20
+ "train_samples_per_second": 17.995,
21
+ "train_steps_per_second": 1.13
22
+ }
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "WinKawaks/vit-tiny-patch16-224",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 192,
11
+ "id2label": {
12
+ "0": "-",
13
+ "1": "0",
14
+ "2": "1",
15
+ "3": "2",
16
+ "4": "3",
17
+ "5": "4",
18
+ "6": "5"
19
+ },
20
+ "image_size": 224,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 768,
23
+ "label2id": {
24
+ "-": "0",
25
+ "0": "1",
26
+ "1": "2",
27
+ "2": "3",
28
+ "3": "4",
29
+ "4": "5",
30
+ "5": "6"
31
+ },
32
+ "layer_norm_eps": 1e-12,
33
+ "model_type": "vit",
34
+ "num_attention_heads": 3,
35
+ "num_channels": 3,
36
+ "num_hidden_layers": 12,
37
+ "patch_size": 16,
38
+ "problem_type": "single_label_classification",
39
+ "qkv_bias": true,
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.48.2"
42
+ }
eval_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 25.0,
3
+ "eval_accuracy": 0.5303030303030303,
4
+ "eval_f1_macro": 0.438453500522466,
5
+ "eval_f1_micro": 0.5303030303030303,
6
+ "eval_f1_weighted": 0.4855651604867906,
7
+ "eval_loss": 1.6140881776809692,
8
+ "eval_precision_macro": 0.5224921277552858,
9
+ "eval_precision_micro": 0.5303030303030303,
10
+ "eval_precision_weighted": 0.5787633419212366,
11
+ "eval_recall_macro": 0.4858276643990929,
12
+ "eval_recall_micro": 0.5303030303030303,
13
+ "eval_recall_weighted": 0.5303030303030303,
14
+ "eval_runtime": 1.1271,
15
+ "eval_samples_per_second": 58.556,
16
+ "eval_steps_per_second": 7.985
17
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:555b806e9d76cce985357a73fb5e5407ec87800b3ccbcce89c15dd4e44a931b5
3
+ size 22125780
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTImageProcessorFast",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
runs/Feb02_21-43-28_modal/events.out.tfevents.1738532609.modal.2.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:400897567d8bde749c698ec42b5c6f74c7791343045c3933f6117613b0c1ea89
3
+ size 102473
runs/Feb02_21-43-28_modal/events.out.tfevents.1738532609.modal.2.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25404312a81211f46a30c7661a7ef0c6d8aa8014514086928f819e67a6344d34
3
+ size 102473
runs/Feb02_21-43-28_modal/events.out.tfevents.1738533252.modal.2.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:926331613a0535638d846792939e8d18635605c564bae5d31325a6d11800779a
3
+ size 921
runs/Feb02_21-43-28_modal/events.out.tfevents.1738533252.modal.2.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9323da24b924abaaf5715ea61c5e8c197ac9251452887da5abd21a5944049c0
3
+ size 921
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 25.0,
3
+ "total_flos": 5.76425379898368e+16,
4
+ "train_loss": 0.7207291752639515,
5
+ "train_runtime": 641.8354,
6
+ "train_samples_per_second": 17.995,
7
+ "train_steps_per_second": 1.13
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,3026 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.3132938146591187,
3
+ "best_model_checkpoint": "WinKawaks/vit-tiny-patch16-224/checkpoint-319",
4
+ "epoch": 25.0,
5
+ "eval_steps": 500,
6
+ "global_step": 725,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.06896551724137931,
13
+ "grad_norm": 50.05537033081055,
14
+ "learning_rate": 2.7397260273972604e-06,
15
+ "loss": 2.392,
16
+ "step": 2
17
+ },
18
+ {
19
+ "epoch": 0.13793103448275862,
20
+ "grad_norm": 21.303316116333008,
21
+ "learning_rate": 5.479452054794521e-06,
22
+ "loss": 1.993,
23
+ "step": 4
24
+ },
25
+ {
26
+ "epoch": 0.20689655172413793,
27
+ "grad_norm": 29.138675689697266,
28
+ "learning_rate": 8.21917808219178e-06,
29
+ "loss": 1.9911,
30
+ "step": 6
31
+ },
32
+ {
33
+ "epoch": 0.27586206896551724,
34
+ "grad_norm": 24.99004364013672,
35
+ "learning_rate": 1.0958904109589042e-05,
36
+ "loss": 2.1828,
37
+ "step": 8
38
+ },
39
+ {
40
+ "epoch": 0.3448275862068966,
41
+ "grad_norm": 29.86865234375,
42
+ "learning_rate": 1.3698630136986302e-05,
43
+ "loss": 1.8818,
44
+ "step": 10
45
+ },
46
+ {
47
+ "epoch": 0.41379310344827586,
48
+ "grad_norm": 33.12383270263672,
49
+ "learning_rate": 1.643835616438356e-05,
50
+ "loss": 2.0859,
51
+ "step": 12
52
+ },
53
+ {
54
+ "epoch": 0.4827586206896552,
55
+ "grad_norm": 25.435964584350586,
56
+ "learning_rate": 1.9178082191780822e-05,
57
+ "loss": 2.0565,
58
+ "step": 14
59
+ },
60
+ {
61
+ "epoch": 0.5517241379310345,
62
+ "grad_norm": 21.697423934936523,
63
+ "learning_rate": 2.1917808219178083e-05,
64
+ "loss": 1.9561,
65
+ "step": 16
66
+ },
67
+ {
68
+ "epoch": 0.6206896551724138,
69
+ "grad_norm": 21.292062759399414,
70
+ "learning_rate": 2.4657534246575342e-05,
71
+ "loss": 1.8509,
72
+ "step": 18
73
+ },
74
+ {
75
+ "epoch": 0.6896551724137931,
76
+ "grad_norm": 25.63444709777832,
77
+ "learning_rate": 2.7397260273972603e-05,
78
+ "loss": 1.9259,
79
+ "step": 20
80
+ },
81
+ {
82
+ "epoch": 0.7586206896551724,
83
+ "grad_norm": 12.96942138671875,
84
+ "learning_rate": 3.0136986301369862e-05,
85
+ "loss": 1.8952,
86
+ "step": 22
87
+ },
88
+ {
89
+ "epoch": 0.8275862068965517,
90
+ "grad_norm": 13.86652660369873,
91
+ "learning_rate": 3.287671232876712e-05,
92
+ "loss": 1.9227,
93
+ "step": 24
94
+ },
95
+ {
96
+ "epoch": 0.896551724137931,
97
+ "grad_norm": 28.36823081970215,
98
+ "learning_rate": 3.561643835616438e-05,
99
+ "loss": 1.8958,
100
+ "step": 26
101
+ },
102
+ {
103
+ "epoch": 0.9655172413793104,
104
+ "grad_norm": 18.257474899291992,
105
+ "learning_rate": 3.8356164383561644e-05,
106
+ "loss": 1.9719,
107
+ "step": 28
108
+ },
109
+ {
110
+ "epoch": 1.0,
111
+ "eval_accuracy": 0.2196969696969697,
112
+ "eval_f1_macro": 0.09493363779078065,
113
+ "eval_f1_micro": 0.2196969696969697,
114
+ "eval_f1_weighted": 0.11546937910574274,
115
+ "eval_loss": 1.9209331274032593,
116
+ "eval_precision_macro": 0.08906952611553096,
117
+ "eval_precision_micro": 0.2196969696969697,
118
+ "eval_precision_weighted": 0.10509758602978943,
119
+ "eval_recall_macro": 0.17224489795918368,
120
+ "eval_recall_micro": 0.2196969696969697,
121
+ "eval_recall_weighted": 0.2196969696969697,
122
+ "eval_runtime": 2.2105,
123
+ "eval_samples_per_second": 59.716,
124
+ "eval_steps_per_second": 7.691,
125
+ "step": 29
126
+ },
127
+ {
128
+ "epoch": 1.0344827586206897,
129
+ "grad_norm": 28.444780349731445,
130
+ "learning_rate": 4.1095890410958905e-05,
131
+ "loss": 1.8292,
132
+ "step": 30
133
+ },
134
+ {
135
+ "epoch": 1.103448275862069,
136
+ "grad_norm": 16.733516693115234,
137
+ "learning_rate": 4.383561643835617e-05,
138
+ "loss": 1.8189,
139
+ "step": 32
140
+ },
141
+ {
142
+ "epoch": 1.1724137931034484,
143
+ "grad_norm": 16.255664825439453,
144
+ "learning_rate": 4.657534246575342e-05,
145
+ "loss": 1.8846,
146
+ "step": 34
147
+ },
148
+ {
149
+ "epoch": 1.2413793103448276,
150
+ "grad_norm": 15.87771987915039,
151
+ "learning_rate": 4.9315068493150684e-05,
152
+ "loss": 1.9816,
153
+ "step": 36
154
+ },
155
+ {
156
+ "epoch": 1.3103448275862069,
157
+ "grad_norm": 20.748777389526367,
158
+ "learning_rate": 5.2054794520547945e-05,
159
+ "loss": 1.8633,
160
+ "step": 38
161
+ },
162
+ {
163
+ "epoch": 1.3793103448275863,
164
+ "grad_norm": 23.478870391845703,
165
+ "learning_rate": 5.479452054794521e-05,
166
+ "loss": 1.9196,
167
+ "step": 40
168
+ },
169
+ {
170
+ "epoch": 1.4482758620689655,
171
+ "grad_norm": 14.190969467163086,
172
+ "learning_rate": 5.753424657534247e-05,
173
+ "loss": 1.8152,
174
+ "step": 42
175
+ },
176
+ {
177
+ "epoch": 1.5172413793103448,
178
+ "grad_norm": 27.225452423095703,
179
+ "learning_rate": 6.0273972602739724e-05,
180
+ "loss": 1.879,
181
+ "step": 44
182
+ },
183
+ {
184
+ "epoch": 1.5862068965517242,
185
+ "grad_norm": 16.791467666625977,
186
+ "learning_rate": 6.301369863013699e-05,
187
+ "loss": 1.851,
188
+ "step": 46
189
+ },
190
+ {
191
+ "epoch": 1.6551724137931034,
192
+ "grad_norm": 17.566041946411133,
193
+ "learning_rate": 6.575342465753424e-05,
194
+ "loss": 1.8164,
195
+ "step": 48
196
+ },
197
+ {
198
+ "epoch": 1.7241379310344827,
199
+ "grad_norm": 24.76405906677246,
200
+ "learning_rate": 6.84931506849315e-05,
201
+ "loss": 2.1271,
202
+ "step": 50
203
+ },
204
+ {
205
+ "epoch": 1.793103448275862,
206
+ "grad_norm": 21.847782135009766,
207
+ "learning_rate": 7.123287671232876e-05,
208
+ "loss": 1.7536,
209
+ "step": 52
210
+ },
211
+ {
212
+ "epoch": 1.8620689655172413,
213
+ "grad_norm": 19.893037796020508,
214
+ "learning_rate": 7.397260273972603e-05,
215
+ "loss": 1.7816,
216
+ "step": 54
217
+ },
218
+ {
219
+ "epoch": 1.9310344827586206,
220
+ "grad_norm": 14.866455078125,
221
+ "learning_rate": 7.671232876712329e-05,
222
+ "loss": 1.5881,
223
+ "step": 56
224
+ },
225
+ {
226
+ "epoch": 2.0,
227
+ "grad_norm": 20.373685836791992,
228
+ "learning_rate": 7.945205479452055e-05,
229
+ "loss": 1.8717,
230
+ "step": 58
231
+ },
232
+ {
233
+ "epoch": 2.0,
234
+ "eval_accuracy": 0.19696969696969696,
235
+ "eval_f1_macro": 0.09534116676973821,
236
+ "eval_f1_micro": 0.19696969696969696,
237
+ "eval_f1_weighted": 0.10689689098780009,
238
+ "eval_loss": 2.0377984046936035,
239
+ "eval_precision_macro": 0.19963369963369962,
240
+ "eval_precision_micro": 0.19696969696969696,
241
+ "eval_precision_weighted": 0.266025641025641,
242
+ "eval_recall_macro": 0.17944066515495088,
243
+ "eval_recall_micro": 0.19696969696969696,
244
+ "eval_recall_weighted": 0.19696969696969696,
245
+ "eval_runtime": 2.1913,
246
+ "eval_samples_per_second": 60.238,
247
+ "eval_steps_per_second": 7.758,
248
+ "step": 58
249
+ },
250
+ {
251
+ "epoch": 2.0689655172413794,
252
+ "grad_norm": 21.545093536376953,
253
+ "learning_rate": 8.219178082191781e-05,
254
+ "loss": 1.752,
255
+ "step": 60
256
+ },
257
+ {
258
+ "epoch": 2.1379310344827585,
259
+ "grad_norm": 15.046628952026367,
260
+ "learning_rate": 8.493150684931507e-05,
261
+ "loss": 1.7137,
262
+ "step": 62
263
+ },
264
+ {
265
+ "epoch": 2.206896551724138,
266
+ "grad_norm": 21.083383560180664,
267
+ "learning_rate": 8.767123287671233e-05,
268
+ "loss": 1.7003,
269
+ "step": 64
270
+ },
271
+ {
272
+ "epoch": 2.2758620689655173,
273
+ "grad_norm": 37.4631462097168,
274
+ "learning_rate": 9.041095890410958e-05,
275
+ "loss": 1.9736,
276
+ "step": 66
277
+ },
278
+ {
279
+ "epoch": 2.344827586206897,
280
+ "grad_norm": 19.171171188354492,
281
+ "learning_rate": 9.315068493150684e-05,
282
+ "loss": 1.7332,
283
+ "step": 68
284
+ },
285
+ {
286
+ "epoch": 2.413793103448276,
287
+ "grad_norm": 16.743974685668945,
288
+ "learning_rate": 9.58904109589041e-05,
289
+ "loss": 1.819,
290
+ "step": 70
291
+ },
292
+ {
293
+ "epoch": 2.4827586206896552,
294
+ "grad_norm": 20.756649017333984,
295
+ "learning_rate": 9.863013698630137e-05,
296
+ "loss": 1.9201,
297
+ "step": 72
298
+ },
299
+ {
300
+ "epoch": 2.5517241379310347,
301
+ "grad_norm": 17.703954696655273,
302
+ "learning_rate": 9.984662576687117e-05,
303
+ "loss": 1.6147,
304
+ "step": 74
305
+ },
306
+ {
307
+ "epoch": 2.6206896551724137,
308
+ "grad_norm": 32.903743743896484,
309
+ "learning_rate": 9.95398773006135e-05,
310
+ "loss": 2.0663,
311
+ "step": 76
312
+ },
313
+ {
314
+ "epoch": 2.689655172413793,
315
+ "grad_norm": 23.617671966552734,
316
+ "learning_rate": 9.923312883435584e-05,
317
+ "loss": 1.9193,
318
+ "step": 78
319
+ },
320
+ {
321
+ "epoch": 2.7586206896551726,
322
+ "grad_norm": 30.0602970123291,
323
+ "learning_rate": 9.892638036809816e-05,
324
+ "loss": 1.7659,
325
+ "step": 80
326
+ },
327
+ {
328
+ "epoch": 2.8275862068965516,
329
+ "grad_norm": 22.806859970092773,
330
+ "learning_rate": 9.861963190184049e-05,
331
+ "loss": 1.4494,
332
+ "step": 82
333
+ },
334
+ {
335
+ "epoch": 2.896551724137931,
336
+ "grad_norm": 16.40064811706543,
337
+ "learning_rate": 9.831288343558283e-05,
338
+ "loss": 1.6664,
339
+ "step": 84
340
+ },
341
+ {
342
+ "epoch": 2.9655172413793105,
343
+ "grad_norm": 24.703716278076172,
344
+ "learning_rate": 9.800613496932515e-05,
345
+ "loss": 1.9326,
346
+ "step": 86
347
+ },
348
+ {
349
+ "epoch": 3.0,
350
+ "eval_accuracy": 0.3939393939393939,
351
+ "eval_f1_macro": 0.2290203682353352,
352
+ "eval_f1_micro": 0.3939393939393939,
353
+ "eval_f1_weighted": 0.2938602829468567,
354
+ "eval_loss": 1.7679647207260132,
355
+ "eval_precision_macro": 0.21510735641170425,
356
+ "eval_precision_micro": 0.3939393939393939,
357
+ "eval_precision_weighted": 0.268160089800406,
358
+ "eval_recall_macro": 0.30038548752834465,
359
+ "eval_recall_micro": 0.3939393939393939,
360
+ "eval_recall_weighted": 0.3939393939393939,
361
+ "eval_runtime": 2.411,
362
+ "eval_samples_per_second": 54.749,
363
+ "eval_steps_per_second": 7.051,
364
+ "step": 87
365
+ },
366
+ {
367
+ "epoch": 3.0344827586206895,
368
+ "grad_norm": 34.48782730102539,
369
+ "learning_rate": 9.76993865030675e-05,
370
+ "loss": 1.9289,
371
+ "step": 88
372
+ },
373
+ {
374
+ "epoch": 3.103448275862069,
375
+ "grad_norm": 20.898527145385742,
376
+ "learning_rate": 9.739263803680982e-05,
377
+ "loss": 1.5641,
378
+ "step": 90
379
+ },
380
+ {
381
+ "epoch": 3.1724137931034484,
382
+ "grad_norm": 20.089338302612305,
383
+ "learning_rate": 9.708588957055215e-05,
384
+ "loss": 1.6451,
385
+ "step": 92
386
+ },
387
+ {
388
+ "epoch": 3.2413793103448274,
389
+ "grad_norm": 21.63447380065918,
390
+ "learning_rate": 9.677914110429448e-05,
391
+ "loss": 1.5942,
392
+ "step": 94
393
+ },
394
+ {
395
+ "epoch": 3.310344827586207,
396
+ "grad_norm": 18.611299514770508,
397
+ "learning_rate": 9.647239263803681e-05,
398
+ "loss": 1.4619,
399
+ "step": 96
400
+ },
401
+ {
402
+ "epoch": 3.3793103448275863,
403
+ "grad_norm": 14.333738327026367,
404
+ "learning_rate": 9.616564417177915e-05,
405
+ "loss": 1.6001,
406
+ "step": 98
407
+ },
408
+ {
409
+ "epoch": 3.4482758620689653,
410
+ "grad_norm": 16.133724212646484,
411
+ "learning_rate": 9.585889570552147e-05,
412
+ "loss": 1.3741,
413
+ "step": 100
414
+ },
415
+ {
416
+ "epoch": 3.5172413793103448,
417
+ "grad_norm": 29.28097915649414,
418
+ "learning_rate": 9.555214723926381e-05,
419
+ "loss": 1.6927,
420
+ "step": 102
421
+ },
422
+ {
423
+ "epoch": 3.586206896551724,
424
+ "grad_norm": 17.335416793823242,
425
+ "learning_rate": 9.524539877300614e-05,
426
+ "loss": 1.1759,
427
+ "step": 104
428
+ },
429
+ {
430
+ "epoch": 3.655172413793103,
431
+ "grad_norm": 25.239665985107422,
432
+ "learning_rate": 9.493865030674846e-05,
433
+ "loss": 1.4696,
434
+ "step": 106
435
+ },
436
+ {
437
+ "epoch": 3.7241379310344827,
438
+ "grad_norm": 19.4155330657959,
439
+ "learning_rate": 9.46319018404908e-05,
440
+ "loss": 1.0003,
441
+ "step": 108
442
+ },
443
+ {
444
+ "epoch": 3.793103448275862,
445
+ "grad_norm": 20.292905807495117,
446
+ "learning_rate": 9.432515337423313e-05,
447
+ "loss": 1.3851,
448
+ "step": 110
449
+ },
450
+ {
451
+ "epoch": 3.862068965517241,
452
+ "grad_norm": 31.247602462768555,
453
+ "learning_rate": 9.401840490797547e-05,
454
+ "loss": 1.6519,
455
+ "step": 112
456
+ },
457
+ {
458
+ "epoch": 3.9310344827586206,
459
+ "grad_norm": 25.56925392150879,
460
+ "learning_rate": 9.37116564417178e-05,
461
+ "loss": 1.4111,
462
+ "step": 114
463
+ },
464
+ {
465
+ "epoch": 4.0,
466
+ "grad_norm": 23.311908721923828,
467
+ "learning_rate": 9.340490797546013e-05,
468
+ "loss": 1.2873,
469
+ "step": 116
470
+ },
471
+ {
472
+ "epoch": 4.0,
473
+ "eval_accuracy": 0.44696969696969696,
474
+ "eval_f1_macro": 0.35023855102631046,
475
+ "eval_f1_micro": 0.44696969696969696,
476
+ "eval_f1_weighted": 0.40824342918240536,
477
+ "eval_loss": 1.5892395973205566,
478
+ "eval_precision_macro": 0.4831240188383045,
479
+ "eval_precision_micro": 0.44696969696969696,
480
+ "eval_precision_weighted": 0.513986013986014,
481
+ "eval_recall_macro": 0.36461829176114885,
482
+ "eval_recall_micro": 0.44696969696969696,
483
+ "eval_recall_weighted": 0.44696969696969696,
484
+ "eval_runtime": 2.1869,
485
+ "eval_samples_per_second": 60.359,
486
+ "eval_steps_per_second": 7.774,
487
+ "step": 116
488
+ },
489
+ {
490
+ "epoch": 4.068965517241379,
491
+ "grad_norm": 31.80891227722168,
492
+ "learning_rate": 9.309815950920246e-05,
493
+ "loss": 1.2896,
494
+ "step": 118
495
+ },
496
+ {
497
+ "epoch": 4.137931034482759,
498
+ "grad_norm": 22.716781616210938,
499
+ "learning_rate": 9.279141104294478e-05,
500
+ "loss": 1.2486,
501
+ "step": 120
502
+ },
503
+ {
504
+ "epoch": 4.206896551724138,
505
+ "grad_norm": 23.80237579345703,
506
+ "learning_rate": 9.248466257668712e-05,
507
+ "loss": 1.3835,
508
+ "step": 122
509
+ },
510
+ {
511
+ "epoch": 4.275862068965517,
512
+ "grad_norm": 18.24744987487793,
513
+ "learning_rate": 9.217791411042945e-05,
514
+ "loss": 0.9575,
515
+ "step": 124
516
+ },
517
+ {
518
+ "epoch": 4.344827586206897,
519
+ "grad_norm": 22.790117263793945,
520
+ "learning_rate": 9.187116564417179e-05,
521
+ "loss": 1.3711,
522
+ "step": 126
523
+ },
524
+ {
525
+ "epoch": 4.413793103448276,
526
+ "grad_norm": 15.804698944091797,
527
+ "learning_rate": 9.156441717791411e-05,
528
+ "loss": 1.4226,
529
+ "step": 128
530
+ },
531
+ {
532
+ "epoch": 4.482758620689655,
533
+ "grad_norm": 30.167261123657227,
534
+ "learning_rate": 9.125766871165644e-05,
535
+ "loss": 1.7736,
536
+ "step": 130
537
+ },
538
+ {
539
+ "epoch": 4.551724137931035,
540
+ "grad_norm": 15.975086212158203,
541
+ "learning_rate": 9.095092024539878e-05,
542
+ "loss": 1.4395,
543
+ "step": 132
544
+ },
545
+ {
546
+ "epoch": 4.620689655172414,
547
+ "grad_norm": 23.406415939331055,
548
+ "learning_rate": 9.06441717791411e-05,
549
+ "loss": 1.6491,
550
+ "step": 134
551
+ },
552
+ {
553
+ "epoch": 4.689655172413794,
554
+ "grad_norm": 30.07583236694336,
555
+ "learning_rate": 9.033742331288344e-05,
556
+ "loss": 1.747,
557
+ "step": 136
558
+ },
559
+ {
560
+ "epoch": 4.758620689655173,
561
+ "grad_norm": 20.77846336364746,
562
+ "learning_rate": 9.003067484662577e-05,
563
+ "loss": 1.6648,
564
+ "step": 138
565
+ },
566
+ {
567
+ "epoch": 4.827586206896552,
568
+ "grad_norm": 11.187516212463379,
569
+ "learning_rate": 8.972392638036811e-05,
570
+ "loss": 1.1932,
571
+ "step": 140
572
+ },
573
+ {
574
+ "epoch": 4.896551724137931,
575
+ "grad_norm": 15.845014572143555,
576
+ "learning_rate": 8.941717791411043e-05,
577
+ "loss": 1.4271,
578
+ "step": 142
579
+ },
580
+ {
581
+ "epoch": 4.9655172413793105,
582
+ "grad_norm": 15.977095603942871,
583
+ "learning_rate": 8.911042944785276e-05,
584
+ "loss": 1.3997,
585
+ "step": 144
586
+ },
587
+ {
588
+ "epoch": 5.0,
589
+ "eval_accuracy": 0.5,
590
+ "eval_f1_macro": 0.34814087704047136,
591
+ "eval_f1_micro": 0.5,
592
+ "eval_f1_weighted": 0.4244756131292643,
593
+ "eval_loss": 1.4773013591766357,
594
+ "eval_precision_macro": 0.34626430480089015,
595
+ "eval_precision_micro": 0.5,
596
+ "eval_precision_weighted": 0.41194737757930666,
597
+ "eval_recall_macro": 0.4051700680272109,
598
+ "eval_recall_micro": 0.5,
599
+ "eval_recall_weighted": 0.5,
600
+ "eval_runtime": 2.2186,
601
+ "eval_samples_per_second": 59.497,
602
+ "eval_steps_per_second": 7.662,
603
+ "step": 145
604
+ },
605
+ {
606
+ "epoch": 5.0344827586206895,
607
+ "grad_norm": 18.65735626220703,
608
+ "learning_rate": 8.88036809815951e-05,
609
+ "loss": 1.6883,
610
+ "step": 146
611
+ },
612
+ {
613
+ "epoch": 5.103448275862069,
614
+ "grad_norm": 18.897695541381836,
615
+ "learning_rate": 8.849693251533742e-05,
616
+ "loss": 1.3035,
617
+ "step": 148
618
+ },
619
+ {
620
+ "epoch": 5.172413793103448,
621
+ "grad_norm": 20.19015884399414,
622
+ "learning_rate": 8.819018404907976e-05,
623
+ "loss": 1.1881,
624
+ "step": 150
625
+ },
626
+ {
627
+ "epoch": 5.241379310344827,
628
+ "grad_norm": 18.574827194213867,
629
+ "learning_rate": 8.788343558282209e-05,
630
+ "loss": 1.0471,
631
+ "step": 152
632
+ },
633
+ {
634
+ "epoch": 5.310344827586207,
635
+ "grad_norm": 15.6314115524292,
636
+ "learning_rate": 8.757668711656443e-05,
637
+ "loss": 1.2302,
638
+ "step": 154
639
+ },
640
+ {
641
+ "epoch": 5.379310344827586,
642
+ "grad_norm": 20.703832626342773,
643
+ "learning_rate": 8.726993865030675e-05,
644
+ "loss": 1.1877,
645
+ "step": 156
646
+ },
647
+ {
648
+ "epoch": 5.448275862068965,
649
+ "grad_norm": 28.778301239013672,
650
+ "learning_rate": 8.696319018404908e-05,
651
+ "loss": 1.5936,
652
+ "step": 158
653
+ },
654
+ {
655
+ "epoch": 5.517241379310345,
656
+ "grad_norm": 25.026071548461914,
657
+ "learning_rate": 8.665644171779142e-05,
658
+ "loss": 1.0255,
659
+ "step": 160
660
+ },
661
+ {
662
+ "epoch": 5.586206896551724,
663
+ "grad_norm": 17.47880744934082,
664
+ "learning_rate": 8.634969325153374e-05,
665
+ "loss": 1.3967,
666
+ "step": 162
667
+ },
668
+ {
669
+ "epoch": 5.655172413793103,
670
+ "grad_norm": 20.092926025390625,
671
+ "learning_rate": 8.604294478527608e-05,
672
+ "loss": 1.2006,
673
+ "step": 164
674
+ },
675
+ {
676
+ "epoch": 5.724137931034483,
677
+ "grad_norm": 17.11634635925293,
678
+ "learning_rate": 8.573619631901841e-05,
679
+ "loss": 1.1081,
680
+ "step": 166
681
+ },
682
+ {
683
+ "epoch": 5.793103448275862,
684
+ "grad_norm": 21.26296043395996,
685
+ "learning_rate": 8.542944785276073e-05,
686
+ "loss": 1.0949,
687
+ "step": 168
688
+ },
689
+ {
690
+ "epoch": 5.862068965517241,
691
+ "grad_norm": 31.731033325195312,
692
+ "learning_rate": 8.512269938650307e-05,
693
+ "loss": 1.3502,
694
+ "step": 170
695
+ },
696
+ {
697
+ "epoch": 5.931034482758621,
698
+ "grad_norm": 25.80246353149414,
699
+ "learning_rate": 8.48159509202454e-05,
700
+ "loss": 1.5673,
701
+ "step": 172
702
+ },
703
+ {
704
+ "epoch": 6.0,
705
+ "grad_norm": 23.60406494140625,
706
+ "learning_rate": 8.450920245398774e-05,
707
+ "loss": 1.7041,
708
+ "step": 174
709
+ },
710
+ {
711
+ "epoch": 6.0,
712
+ "eval_accuracy": 0.5378787878787878,
713
+ "eval_f1_macro": 0.4265559579914266,
714
+ "eval_f1_micro": 0.5378787878787878,
715
+ "eval_f1_weighted": 0.5005041399030636,
716
+ "eval_loss": 1.4405734539031982,
717
+ "eval_precision_macro": 0.5010893868036724,
718
+ "eval_precision_micro": 0.5378787878787878,
719
+ "eval_precision_weighted": 0.5628245491881856,
720
+ "eval_recall_macro": 0.4529024943310657,
721
+ "eval_recall_micro": 0.5378787878787878,
722
+ "eval_recall_weighted": 0.5378787878787878,
723
+ "eval_runtime": 2.215,
724
+ "eval_samples_per_second": 59.594,
725
+ "eval_steps_per_second": 7.675,
726
+ "step": 174
727
+ },
728
+ {
729
+ "epoch": 6.068965517241379,
730
+ "grad_norm": 20.430105209350586,
731
+ "learning_rate": 8.420245398773006e-05,
732
+ "loss": 1.0018,
733
+ "step": 176
734
+ },
735
+ {
736
+ "epoch": 6.137931034482759,
737
+ "grad_norm": 14.565896987915039,
738
+ "learning_rate": 8.38957055214724e-05,
739
+ "loss": 1.02,
740
+ "step": 178
741
+ },
742
+ {
743
+ "epoch": 6.206896551724138,
744
+ "grad_norm": 21.370939254760742,
745
+ "learning_rate": 8.358895705521473e-05,
746
+ "loss": 1.1403,
747
+ "step": 180
748
+ },
749
+ {
750
+ "epoch": 6.275862068965517,
751
+ "grad_norm": 16.42601776123047,
752
+ "learning_rate": 8.328220858895705e-05,
753
+ "loss": 1.4774,
754
+ "step": 182
755
+ },
756
+ {
757
+ "epoch": 6.344827586206897,
758
+ "grad_norm": 18.844532012939453,
759
+ "learning_rate": 8.297546012269939e-05,
760
+ "loss": 1.1568,
761
+ "step": 184
762
+ },
763
+ {
764
+ "epoch": 6.413793103448276,
765
+ "grad_norm": 12.009956359863281,
766
+ "learning_rate": 8.266871165644172e-05,
767
+ "loss": 0.8588,
768
+ "step": 186
769
+ },
770
+ {
771
+ "epoch": 6.482758620689655,
772
+ "grad_norm": 21.886213302612305,
773
+ "learning_rate": 8.236196319018406e-05,
774
+ "loss": 0.8875,
775
+ "step": 188
776
+ },
777
+ {
778
+ "epoch": 6.551724137931035,
779
+ "grad_norm": 16.68918800354004,
780
+ "learning_rate": 8.205521472392638e-05,
781
+ "loss": 1.2283,
782
+ "step": 190
783
+ },
784
+ {
785
+ "epoch": 6.620689655172414,
786
+ "grad_norm": 19.978803634643555,
787
+ "learning_rate": 8.174846625766872e-05,
788
+ "loss": 1.0841,
789
+ "step": 192
790
+ },
791
+ {
792
+ "epoch": 6.689655172413794,
793
+ "grad_norm": 16.348190307617188,
794
+ "learning_rate": 8.144171779141105e-05,
795
+ "loss": 0.9849,
796
+ "step": 194
797
+ },
798
+ {
799
+ "epoch": 6.758620689655173,
800
+ "grad_norm": 21.0911865234375,
801
+ "learning_rate": 8.113496932515337e-05,
802
+ "loss": 1.6812,
803
+ "step": 196
804
+ },
805
+ {
806
+ "epoch": 6.827586206896552,
807
+ "grad_norm": 17.614559173583984,
808
+ "learning_rate": 8.082822085889571e-05,
809
+ "loss": 1.0849,
810
+ "step": 198
811
+ },
812
+ {
813
+ "epoch": 6.896551724137931,
814
+ "grad_norm": 19.585248947143555,
815
+ "learning_rate": 8.052147239263804e-05,
816
+ "loss": 0.9886,
817
+ "step": 200
818
+ },
819
+ {
820
+ "epoch": 6.9655172413793105,
821
+ "grad_norm": 19.765750885009766,
822
+ "learning_rate": 8.021472392638038e-05,
823
+ "loss": 1.1863,
824
+ "step": 202
825
+ },
826
+ {
827
+ "epoch": 7.0,
828
+ "eval_accuracy": 0.5681818181818182,
829
+ "eval_f1_macro": 0.4758501564659626,
830
+ "eval_f1_micro": 0.5681818181818182,
831
+ "eval_f1_weighted": 0.5400274091359906,
832
+ "eval_loss": 1.3679978847503662,
833
+ "eval_precision_macro": 0.5558842701699845,
834
+ "eval_precision_micro": 0.5681818181818182,
835
+ "eval_precision_weighted": 0.603162194071285,
836
+ "eval_recall_macro": 0.4831443688586546,
837
+ "eval_recall_micro": 0.5681818181818182,
838
+ "eval_recall_weighted": 0.5681818181818182,
839
+ "eval_runtime": 2.1971,
840
+ "eval_samples_per_second": 60.078,
841
+ "eval_steps_per_second": 7.737,
842
+ "step": 203
843
+ },
844
+ {
845
+ "epoch": 7.0344827586206895,
846
+ "grad_norm": 12.333309173583984,
847
+ "learning_rate": 7.99079754601227e-05,
848
+ "loss": 0.8463,
849
+ "step": 204
850
+ },
851
+ {
852
+ "epoch": 7.103448275862069,
853
+ "grad_norm": 21.288188934326172,
854
+ "learning_rate": 7.960122699386503e-05,
855
+ "loss": 0.9617,
856
+ "step": 206
857
+ },
858
+ {
859
+ "epoch": 7.172413793103448,
860
+ "grad_norm": 23.408567428588867,
861
+ "learning_rate": 7.929447852760737e-05,
862
+ "loss": 1.2719,
863
+ "step": 208
864
+ },
865
+ {
866
+ "epoch": 7.241379310344827,
867
+ "grad_norm": 16.838363647460938,
868
+ "learning_rate": 7.898773006134969e-05,
869
+ "loss": 0.9394,
870
+ "step": 210
871
+ },
872
+ {
873
+ "epoch": 7.310344827586207,
874
+ "grad_norm": 19.406648635864258,
875
+ "learning_rate": 7.868098159509203e-05,
876
+ "loss": 1.0913,
877
+ "step": 212
878
+ },
879
+ {
880
+ "epoch": 7.379310344827586,
881
+ "grad_norm": 25.161184310913086,
882
+ "learning_rate": 7.837423312883436e-05,
883
+ "loss": 1.029,
884
+ "step": 214
885
+ },
886
+ {
887
+ "epoch": 7.448275862068965,
888
+ "grad_norm": 24.920177459716797,
889
+ "learning_rate": 7.80674846625767e-05,
890
+ "loss": 1.3211,
891
+ "step": 216
892
+ },
893
+ {
894
+ "epoch": 7.517241379310345,
895
+ "grad_norm": 22.075044631958008,
896
+ "learning_rate": 7.776073619631902e-05,
897
+ "loss": 1.226,
898
+ "step": 218
899
+ },
900
+ {
901
+ "epoch": 7.586206896551724,
902
+ "grad_norm": 17.07358169555664,
903
+ "learning_rate": 7.745398773006135e-05,
904
+ "loss": 0.8096,
905
+ "step": 220
906
+ },
907
+ {
908
+ "epoch": 7.655172413793103,
909
+ "grad_norm": 23.150299072265625,
910
+ "learning_rate": 7.714723926380369e-05,
911
+ "loss": 1.0163,
912
+ "step": 222
913
+ },
914
+ {
915
+ "epoch": 7.724137931034483,
916
+ "grad_norm": 19.737802505493164,
917
+ "learning_rate": 7.684049079754601e-05,
918
+ "loss": 1.0773,
919
+ "step": 224
920
+ },
921
+ {
922
+ "epoch": 7.793103448275862,
923
+ "grad_norm": 25.407928466796875,
924
+ "learning_rate": 7.653374233128835e-05,
925
+ "loss": 1.2907,
926
+ "step": 226
927
+ },
928
+ {
929
+ "epoch": 7.862068965517241,
930
+ "grad_norm": 14.86108112335205,
931
+ "learning_rate": 7.622699386503068e-05,
932
+ "loss": 0.9851,
933
+ "step": 228
934
+ },
935
+ {
936
+ "epoch": 7.931034482758621,
937
+ "grad_norm": 16.41703987121582,
938
+ "learning_rate": 7.5920245398773e-05,
939
+ "loss": 1.405,
940
+ "step": 230
941
+ },
942
+ {
943
+ "epoch": 8.0,
944
+ "grad_norm": 20.147233963012695,
945
+ "learning_rate": 7.561349693251534e-05,
946
+ "loss": 0.9817,
947
+ "step": 232
948
+ },
949
+ {
950
+ "epoch": 8.0,
951
+ "eval_accuracy": 0.5227272727272727,
952
+ "eval_f1_macro": 0.43986070618723677,
953
+ "eval_f1_micro": 0.5227272727272727,
954
+ "eval_f1_weighted": 0.4968812272383701,
955
+ "eval_loss": 1.3514596223831177,
956
+ "eval_precision_macro": 0.44448009061813915,
957
+ "eval_precision_micro": 0.5227272727272727,
958
+ "eval_precision_weighted": 0.5088011746058729,
959
+ "eval_recall_macro": 0.47222222222222215,
960
+ "eval_recall_micro": 0.5227272727272727,
961
+ "eval_recall_weighted": 0.5227272727272727,
962
+ "eval_runtime": 2.1836,
963
+ "eval_samples_per_second": 60.451,
964
+ "eval_steps_per_second": 7.785,
965
+ "step": 232
966
+ },
967
+ {
968
+ "epoch": 8.068965517241379,
969
+ "grad_norm": 19.058284759521484,
970
+ "learning_rate": 7.530674846625767e-05,
971
+ "loss": 0.7432,
972
+ "step": 234
973
+ },
974
+ {
975
+ "epoch": 8.137931034482758,
976
+ "grad_norm": 15.031048774719238,
977
+ "learning_rate": 7.500000000000001e-05,
978
+ "loss": 0.9929,
979
+ "step": 236
980
+ },
981
+ {
982
+ "epoch": 8.206896551724139,
983
+ "grad_norm": 22.36937713623047,
984
+ "learning_rate": 7.469325153374233e-05,
985
+ "loss": 1.121,
986
+ "step": 238
987
+ },
988
+ {
989
+ "epoch": 8.275862068965518,
990
+ "grad_norm": 20.049163818359375,
991
+ "learning_rate": 7.438650306748467e-05,
992
+ "loss": 1.0773,
993
+ "step": 240
994
+ },
995
+ {
996
+ "epoch": 8.344827586206897,
997
+ "grad_norm": 17.548959732055664,
998
+ "learning_rate": 7.4079754601227e-05,
999
+ "loss": 0.6477,
1000
+ "step": 242
1001
+ },
1002
+ {
1003
+ "epoch": 8.413793103448276,
1004
+ "grad_norm": 25.496204376220703,
1005
+ "learning_rate": 7.377300613496932e-05,
1006
+ "loss": 0.8577,
1007
+ "step": 244
1008
+ },
1009
+ {
1010
+ "epoch": 8.482758620689655,
1011
+ "grad_norm": 22.851713180541992,
1012
+ "learning_rate": 7.346625766871166e-05,
1013
+ "loss": 1.0034,
1014
+ "step": 246
1015
+ },
1016
+ {
1017
+ "epoch": 8.551724137931034,
1018
+ "grad_norm": 26.218107223510742,
1019
+ "learning_rate": 7.315950920245399e-05,
1020
+ "loss": 0.7915,
1021
+ "step": 248
1022
+ },
1023
+ {
1024
+ "epoch": 8.620689655172415,
1025
+ "grad_norm": 18.867645263671875,
1026
+ "learning_rate": 7.285276073619633e-05,
1027
+ "loss": 1.0731,
1028
+ "step": 250
1029
+ },
1030
+ {
1031
+ "epoch": 8.689655172413794,
1032
+ "grad_norm": 16.624637603759766,
1033
+ "learning_rate": 7.254601226993865e-05,
1034
+ "loss": 0.706,
1035
+ "step": 252
1036
+ },
1037
+ {
1038
+ "epoch": 8.758620689655173,
1039
+ "grad_norm": 26.590402603149414,
1040
+ "learning_rate": 7.223926380368099e-05,
1041
+ "loss": 1.1713,
1042
+ "step": 254
1043
+ },
1044
+ {
1045
+ "epoch": 8.827586206896552,
1046
+ "grad_norm": 26.262710571289062,
1047
+ "learning_rate": 7.193251533742332e-05,
1048
+ "loss": 0.8558,
1049
+ "step": 256
1050
+ },
1051
+ {
1052
+ "epoch": 8.89655172413793,
1053
+ "grad_norm": 24.299407958984375,
1054
+ "learning_rate": 7.162576687116564e-05,
1055
+ "loss": 1.2063,
1056
+ "step": 258
1057
+ },
1058
+ {
1059
+ "epoch": 8.96551724137931,
1060
+ "grad_norm": 14.688630104064941,
1061
+ "learning_rate": 7.131901840490798e-05,
1062
+ "loss": 0.617,
1063
+ "step": 260
1064
+ },
1065
+ {
1066
+ "epoch": 9.0,
1067
+ "eval_accuracy": 0.5909090909090909,
1068
+ "eval_f1_macro": 0.48949553001277135,
1069
+ "eval_f1_micro": 0.5909090909090909,
1070
+ "eval_f1_weighted": 0.5554842002399473,
1071
+ "eval_loss": 1.3866709470748901,
1072
+ "eval_precision_macro": 0.5135558290637433,
1073
+ "eval_precision_micro": 0.5909090909090909,
1074
+ "eval_precision_weighted": 0.5775828309138267,
1075
+ "eval_recall_macro": 0.5183068783068784,
1076
+ "eval_recall_micro": 0.5909090909090909,
1077
+ "eval_recall_weighted": 0.5909090909090909,
1078
+ "eval_runtime": 2.1836,
1079
+ "eval_samples_per_second": 60.452,
1080
+ "eval_steps_per_second": 7.785,
1081
+ "step": 261
1082
+ },
1083
+ {
1084
+ "epoch": 9.03448275862069,
1085
+ "grad_norm": 14.76333236694336,
1086
+ "learning_rate": 7.101226993865031e-05,
1087
+ "loss": 0.5944,
1088
+ "step": 262
1089
+ },
1090
+ {
1091
+ "epoch": 9.10344827586207,
1092
+ "grad_norm": 24.155582427978516,
1093
+ "learning_rate": 7.070552147239265e-05,
1094
+ "loss": 0.7582,
1095
+ "step": 264
1096
+ },
1097
+ {
1098
+ "epoch": 9.172413793103448,
1099
+ "grad_norm": 28.48207664489746,
1100
+ "learning_rate": 7.039877300613497e-05,
1101
+ "loss": 0.8912,
1102
+ "step": 266
1103
+ },
1104
+ {
1105
+ "epoch": 9.241379310344827,
1106
+ "grad_norm": 10.216355323791504,
1107
+ "learning_rate": 7.00920245398773e-05,
1108
+ "loss": 0.6462,
1109
+ "step": 268
1110
+ },
1111
+ {
1112
+ "epoch": 9.310344827586206,
1113
+ "grad_norm": 26.633636474609375,
1114
+ "learning_rate": 6.978527607361964e-05,
1115
+ "loss": 0.9824,
1116
+ "step": 270
1117
+ },
1118
+ {
1119
+ "epoch": 9.379310344827585,
1120
+ "grad_norm": 24.09172821044922,
1121
+ "learning_rate": 6.947852760736196e-05,
1122
+ "loss": 0.6394,
1123
+ "step": 272
1124
+ },
1125
+ {
1126
+ "epoch": 9.448275862068966,
1127
+ "grad_norm": 27.006250381469727,
1128
+ "learning_rate": 6.91717791411043e-05,
1129
+ "loss": 0.9335,
1130
+ "step": 274
1131
+ },
1132
+ {
1133
+ "epoch": 9.517241379310345,
1134
+ "grad_norm": 22.846731185913086,
1135
+ "learning_rate": 6.886503067484663e-05,
1136
+ "loss": 0.6089,
1137
+ "step": 276
1138
+ },
1139
+ {
1140
+ "epoch": 9.586206896551724,
1141
+ "grad_norm": 19.58112144470215,
1142
+ "learning_rate": 6.855828220858897e-05,
1143
+ "loss": 0.7781,
1144
+ "step": 278
1145
+ },
1146
+ {
1147
+ "epoch": 9.655172413793103,
1148
+ "grad_norm": 13.92150592803955,
1149
+ "learning_rate": 6.825153374233129e-05,
1150
+ "loss": 0.5359,
1151
+ "step": 280
1152
+ },
1153
+ {
1154
+ "epoch": 9.724137931034482,
1155
+ "grad_norm": 31.10985565185547,
1156
+ "learning_rate": 6.794478527607362e-05,
1157
+ "loss": 1.1988,
1158
+ "step": 282
1159
+ },
1160
+ {
1161
+ "epoch": 9.793103448275861,
1162
+ "grad_norm": 23.35214614868164,
1163
+ "learning_rate": 6.763803680981596e-05,
1164
+ "loss": 0.9214,
1165
+ "step": 284
1166
+ },
1167
+ {
1168
+ "epoch": 9.862068965517242,
1169
+ "grad_norm": 28.431663513183594,
1170
+ "learning_rate": 6.733128834355828e-05,
1171
+ "loss": 0.7769,
1172
+ "step": 286
1173
+ },
1174
+ {
1175
+ "epoch": 9.931034482758621,
1176
+ "grad_norm": 26.589706420898438,
1177
+ "learning_rate": 6.702453987730062e-05,
1178
+ "loss": 0.7962,
1179
+ "step": 288
1180
+ },
1181
+ {
1182
+ "epoch": 10.0,
1183
+ "grad_norm": 31.327491760253906,
1184
+ "learning_rate": 6.671779141104295e-05,
1185
+ "loss": 1.0365,
1186
+ "step": 290
1187
+ },
1188
+ {
1189
+ "epoch": 10.0,
1190
+ "eval_accuracy": 0.5378787878787878,
1191
+ "eval_f1_macro": 0.43132712141928736,
1192
+ "eval_f1_micro": 0.5378787878787878,
1193
+ "eval_f1_weighted": 0.49605830250991545,
1194
+ "eval_loss": 1.4607229232788086,
1195
+ "eval_precision_macro": 0.4370555865025911,
1196
+ "eval_precision_micro": 0.5378787878787878,
1197
+ "eval_precision_weighted": 0.4996555594942692,
1198
+ "eval_recall_macro": 0.46741496598639454,
1199
+ "eval_recall_micro": 0.5378787878787878,
1200
+ "eval_recall_weighted": 0.5378787878787878,
1201
+ "eval_runtime": 2.1864,
1202
+ "eval_samples_per_second": 60.372,
1203
+ "eval_steps_per_second": 7.775,
1204
+ "step": 290
1205
+ },
1206
+ {
1207
+ "epoch": 10.068965517241379,
1208
+ "grad_norm": 35.857444763183594,
1209
+ "learning_rate": 6.641104294478529e-05,
1210
+ "loss": 1.1976,
1211
+ "step": 292
1212
+ },
1213
+ {
1214
+ "epoch": 10.137931034482758,
1215
+ "grad_norm": 19.138635635375977,
1216
+ "learning_rate": 6.610429447852761e-05,
1217
+ "loss": 0.775,
1218
+ "step": 294
1219
+ },
1220
+ {
1221
+ "epoch": 10.206896551724139,
1222
+ "grad_norm": 28.3044490814209,
1223
+ "learning_rate": 6.579754601226994e-05,
1224
+ "loss": 0.7642,
1225
+ "step": 296
1226
+ },
1227
+ {
1228
+ "epoch": 10.275862068965518,
1229
+ "grad_norm": 20.905742645263672,
1230
+ "learning_rate": 6.549079754601228e-05,
1231
+ "loss": 0.5362,
1232
+ "step": 298
1233
+ },
1234
+ {
1235
+ "epoch": 10.344827586206897,
1236
+ "grad_norm": 18.387508392333984,
1237
+ "learning_rate": 6.51840490797546e-05,
1238
+ "loss": 0.9838,
1239
+ "step": 300
1240
+ },
1241
+ {
1242
+ "epoch": 10.413793103448276,
1243
+ "grad_norm": 13.722414016723633,
1244
+ "learning_rate": 6.487730061349694e-05,
1245
+ "loss": 0.446,
1246
+ "step": 302
1247
+ },
1248
+ {
1249
+ "epoch": 10.482758620689655,
1250
+ "grad_norm": 20.892261505126953,
1251
+ "learning_rate": 6.457055214723927e-05,
1252
+ "loss": 0.7612,
1253
+ "step": 304
1254
+ },
1255
+ {
1256
+ "epoch": 10.551724137931034,
1257
+ "grad_norm": 13.154946327209473,
1258
+ "learning_rate": 6.426380368098159e-05,
1259
+ "loss": 0.7933,
1260
+ "step": 306
1261
+ },
1262
+ {
1263
+ "epoch": 10.620689655172415,
1264
+ "grad_norm": 16.057727813720703,
1265
+ "learning_rate": 6.395705521472393e-05,
1266
+ "loss": 0.522,
1267
+ "step": 308
1268
+ },
1269
+ {
1270
+ "epoch": 10.689655172413794,
1271
+ "grad_norm": 19.725608825683594,
1272
+ "learning_rate": 6.365030674846626e-05,
1273
+ "loss": 0.7614,
1274
+ "step": 310
1275
+ },
1276
+ {
1277
+ "epoch": 10.758620689655173,
1278
+ "grad_norm": 24.844079971313477,
1279
+ "learning_rate": 6.33435582822086e-05,
1280
+ "loss": 0.7933,
1281
+ "step": 312
1282
+ },
1283
+ {
1284
+ "epoch": 10.827586206896552,
1285
+ "grad_norm": 18.63338279724121,
1286
+ "learning_rate": 6.303680981595092e-05,
1287
+ "loss": 0.7279,
1288
+ "step": 314
1289
+ },
1290
+ {
1291
+ "epoch": 10.89655172413793,
1292
+ "grad_norm": 21.24047088623047,
1293
+ "learning_rate": 6.273006134969326e-05,
1294
+ "loss": 0.5442,
1295
+ "step": 316
1296
+ },
1297
+ {
1298
+ "epoch": 10.96551724137931,
1299
+ "grad_norm": 33.37268829345703,
1300
+ "learning_rate": 6.242331288343559e-05,
1301
+ "loss": 0.6815,
1302
+ "step": 318
1303
+ },
1304
+ {
1305
+ "epoch": 11.0,
1306
+ "eval_accuracy": 0.5909090909090909,
1307
+ "eval_f1_macro": 0.49620898933890345,
1308
+ "eval_f1_micro": 0.5909090909090909,
1309
+ "eval_f1_weighted": 0.5663560659976746,
1310
+ "eval_loss": 1.3132938146591187,
1311
+ "eval_precision_macro": 0.5087159863945577,
1312
+ "eval_precision_micro": 0.5909090909090909,
1313
+ "eval_precision_weighted": 0.5741680194805194,
1314
+ "eval_recall_macro": 0.5132879818594104,
1315
+ "eval_recall_micro": 0.5909090909090909,
1316
+ "eval_recall_weighted": 0.5909090909090909,
1317
+ "eval_runtime": 2.1795,
1318
+ "eval_samples_per_second": 60.565,
1319
+ "eval_steps_per_second": 7.8,
1320
+ "step": 319
1321
+ },
1322
+ {
1323
+ "epoch": 11.03448275862069,
1324
+ "grad_norm": 16.19150733947754,
1325
+ "learning_rate": 6.211656441717791e-05,
1326
+ "loss": 0.644,
1327
+ "step": 320
1328
+ },
1329
+ {
1330
+ "epoch": 11.10344827586207,
1331
+ "grad_norm": 20.564546585083008,
1332
+ "learning_rate": 6.180981595092025e-05,
1333
+ "loss": 0.4998,
1334
+ "step": 322
1335
+ },
1336
+ {
1337
+ "epoch": 11.172413793103448,
1338
+ "grad_norm": 19.2364444732666,
1339
+ "learning_rate": 6.150306748466258e-05,
1340
+ "loss": 0.7469,
1341
+ "step": 324
1342
+ },
1343
+ {
1344
+ "epoch": 11.241379310344827,
1345
+ "grad_norm": 11.9139404296875,
1346
+ "learning_rate": 6.119631901840492e-05,
1347
+ "loss": 0.3421,
1348
+ "step": 326
1349
+ },
1350
+ {
1351
+ "epoch": 11.310344827586206,
1352
+ "grad_norm": 15.564549446105957,
1353
+ "learning_rate": 6.088957055214725e-05,
1354
+ "loss": 0.454,
1355
+ "step": 328
1356
+ },
1357
+ {
1358
+ "epoch": 11.379310344827585,
1359
+ "grad_norm": 15.790903091430664,
1360
+ "learning_rate": 6.058282208588958e-05,
1361
+ "loss": 0.5501,
1362
+ "step": 330
1363
+ },
1364
+ {
1365
+ "epoch": 11.448275862068966,
1366
+ "grad_norm": 20.395984649658203,
1367
+ "learning_rate": 6.02760736196319e-05,
1368
+ "loss": 0.7396,
1369
+ "step": 332
1370
+ },
1371
+ {
1372
+ "epoch": 11.517241379310345,
1373
+ "grad_norm": 13.017558097839355,
1374
+ "learning_rate": 5.996932515337423e-05,
1375
+ "loss": 0.3902,
1376
+ "step": 334
1377
+ },
1378
+ {
1379
+ "epoch": 11.586206896551724,
1380
+ "grad_norm": 16.60504150390625,
1381
+ "learning_rate": 5.9662576687116564e-05,
1382
+ "loss": 0.4817,
1383
+ "step": 336
1384
+ },
1385
+ {
1386
+ "epoch": 11.655172413793103,
1387
+ "grad_norm": 26.98207664489746,
1388
+ "learning_rate": 5.93558282208589e-05,
1389
+ "loss": 0.7637,
1390
+ "step": 338
1391
+ },
1392
+ {
1393
+ "epoch": 11.724137931034482,
1394
+ "grad_norm": 27.36790657043457,
1395
+ "learning_rate": 5.9049079754601235e-05,
1396
+ "loss": 0.7137,
1397
+ "step": 340
1398
+ },
1399
+ {
1400
+ "epoch": 11.793103448275861,
1401
+ "grad_norm": 21.537046432495117,
1402
+ "learning_rate": 5.874233128834357e-05,
1403
+ "loss": 0.5965,
1404
+ "step": 342
1405
+ },
1406
+ {
1407
+ "epoch": 11.862068965517242,
1408
+ "grad_norm": 23.125181198120117,
1409
+ "learning_rate": 5.8435582822085886e-05,
1410
+ "loss": 0.5142,
1411
+ "step": 344
1412
+ },
1413
+ {
1414
+ "epoch": 11.931034482758621,
1415
+ "grad_norm": 24.718408584594727,
1416
+ "learning_rate": 5.812883435582822e-05,
1417
+ "loss": 0.7012,
1418
+ "step": 346
1419
+ },
1420
+ {
1421
+ "epoch": 12.0,
1422
+ "grad_norm": 10.676513671875,
1423
+ "learning_rate": 5.782208588957055e-05,
1424
+ "loss": 0.4153,
1425
+ "step": 348
1426
+ },
1427
+ {
1428
+ "epoch": 12.0,
1429
+ "eval_accuracy": 0.5909090909090909,
1430
+ "eval_f1_macro": 0.5082184346733783,
1431
+ "eval_f1_micro": 0.5909090909090909,
1432
+ "eval_f1_weighted": 0.5734953480846786,
1433
+ "eval_loss": 1.3527586460113525,
1434
+ "eval_precision_macro": 0.518512557765101,
1435
+ "eval_precision_micro": 0.5909090909090909,
1436
+ "eval_precision_weighted": 0.5819735429220373,
1437
+ "eval_recall_macro": 0.5201889644746788,
1438
+ "eval_recall_micro": 0.5909090909090909,
1439
+ "eval_recall_weighted": 0.5909090909090909,
1440
+ "eval_runtime": 2.1855,
1441
+ "eval_samples_per_second": 60.399,
1442
+ "eval_steps_per_second": 7.779,
1443
+ "step": 348
1444
+ },
1445
+ {
1446
+ "epoch": 12.068965517241379,
1447
+ "grad_norm": 9.67835521697998,
1448
+ "learning_rate": 5.751533742331289e-05,
1449
+ "loss": 0.341,
1450
+ "step": 350
1451
+ },
1452
+ {
1453
+ "epoch": 12.137931034482758,
1454
+ "grad_norm": 26.32645034790039,
1455
+ "learning_rate": 5.720858895705522e-05,
1456
+ "loss": 0.6657,
1457
+ "step": 352
1458
+ },
1459
+ {
1460
+ "epoch": 12.206896551724139,
1461
+ "grad_norm": 26.893024444580078,
1462
+ "learning_rate": 5.6901840490797555e-05,
1463
+ "loss": 0.6414,
1464
+ "step": 354
1465
+ },
1466
+ {
1467
+ "epoch": 12.275862068965518,
1468
+ "grad_norm": 20.325834274291992,
1469
+ "learning_rate": 5.6595092024539874e-05,
1470
+ "loss": 0.5375,
1471
+ "step": 356
1472
+ },
1473
+ {
1474
+ "epoch": 12.344827586206897,
1475
+ "grad_norm": 14.722708702087402,
1476
+ "learning_rate": 5.6288343558282206e-05,
1477
+ "loss": 0.3507,
1478
+ "step": 358
1479
+ },
1480
+ {
1481
+ "epoch": 12.413793103448276,
1482
+ "grad_norm": 16.164493560791016,
1483
+ "learning_rate": 5.598159509202454e-05,
1484
+ "loss": 0.3948,
1485
+ "step": 360
1486
+ },
1487
+ {
1488
+ "epoch": 12.482758620689655,
1489
+ "grad_norm": 20.786996841430664,
1490
+ "learning_rate": 5.567484662576688e-05,
1491
+ "loss": 0.4795,
1492
+ "step": 362
1493
+ },
1494
+ {
1495
+ "epoch": 12.551724137931034,
1496
+ "grad_norm": 14.129579544067383,
1497
+ "learning_rate": 5.536809815950921e-05,
1498
+ "loss": 0.2681,
1499
+ "step": 364
1500
+ },
1501
+ {
1502
+ "epoch": 12.620689655172415,
1503
+ "grad_norm": 14.56933879852295,
1504
+ "learning_rate": 5.506134969325154e-05,
1505
+ "loss": 0.5054,
1506
+ "step": 366
1507
+ },
1508
+ {
1509
+ "epoch": 12.689655172413794,
1510
+ "grad_norm": 30.525728225708008,
1511
+ "learning_rate": 5.475460122699386e-05,
1512
+ "loss": 0.5441,
1513
+ "step": 368
1514
+ },
1515
+ {
1516
+ "epoch": 12.758620689655173,
1517
+ "grad_norm": 26.576383590698242,
1518
+ "learning_rate": 5.4447852760736193e-05,
1519
+ "loss": 0.7958,
1520
+ "step": 370
1521
+ },
1522
+ {
1523
+ "epoch": 12.827586206896552,
1524
+ "grad_norm": 15.745062828063965,
1525
+ "learning_rate": 5.4141104294478526e-05,
1526
+ "loss": 0.4405,
1527
+ "step": 372
1528
+ },
1529
+ {
1530
+ "epoch": 12.89655172413793,
1531
+ "grad_norm": 27.86982536315918,
1532
+ "learning_rate": 5.3834355828220865e-05,
1533
+ "loss": 0.5312,
1534
+ "step": 374
1535
+ },
1536
+ {
1537
+ "epoch": 12.96551724137931,
1538
+ "grad_norm": 14.46611213684082,
1539
+ "learning_rate": 5.35276073619632e-05,
1540
+ "loss": 0.3396,
1541
+ "step": 376
1542
+ },
1543
+ {
1544
+ "epoch": 13.0,
1545
+ "eval_accuracy": 0.5909090909090909,
1546
+ "eval_f1_macro": 0.53715778106022,
1547
+ "eval_f1_micro": 0.5909090909090909,
1548
+ "eval_f1_weighted": 0.5830308957803414,
1549
+ "eval_loss": 1.385578989982605,
1550
+ "eval_precision_macro": 0.5622709610159853,
1551
+ "eval_precision_micro": 0.5909090909090909,
1552
+ "eval_precision_weighted": 0.6017770445498773,
1553
+ "eval_recall_macro": 0.5387226001511716,
1554
+ "eval_recall_micro": 0.5909090909090909,
1555
+ "eval_recall_weighted": 0.5909090909090909,
1556
+ "eval_runtime": 2.1898,
1557
+ "eval_samples_per_second": 60.279,
1558
+ "eval_steps_per_second": 7.763,
1559
+ "step": 377
1560
+ },
1561
+ {
1562
+ "epoch": 13.03448275862069,
1563
+ "grad_norm": 17.2612361907959,
1564
+ "learning_rate": 5.322085889570553e-05,
1565
+ "loss": 0.3936,
1566
+ "step": 378
1567
+ },
1568
+ {
1569
+ "epoch": 13.10344827586207,
1570
+ "grad_norm": 14.51452350616455,
1571
+ "learning_rate": 5.291411042944786e-05,
1572
+ "loss": 0.3816,
1573
+ "step": 380
1574
+ },
1575
+ {
1576
+ "epoch": 13.172413793103448,
1577
+ "grad_norm": 11.573400497436523,
1578
+ "learning_rate": 5.260736196319018e-05,
1579
+ "loss": 0.3908,
1580
+ "step": 382
1581
+ },
1582
+ {
1583
+ "epoch": 13.241379310344827,
1584
+ "grad_norm": 18.94368553161621,
1585
+ "learning_rate": 5.230061349693251e-05,
1586
+ "loss": 0.5848,
1587
+ "step": 384
1588
+ },
1589
+ {
1590
+ "epoch": 13.310344827586206,
1591
+ "grad_norm": 18.253276824951172,
1592
+ "learning_rate": 5.1993865030674845e-05,
1593
+ "loss": 0.2693,
1594
+ "step": 386
1595
+ },
1596
+ {
1597
+ "epoch": 13.379310344827585,
1598
+ "grad_norm": 12.632643699645996,
1599
+ "learning_rate": 5.1687116564417185e-05,
1600
+ "loss": 0.4368,
1601
+ "step": 388
1602
+ },
1603
+ {
1604
+ "epoch": 13.448275862068966,
1605
+ "grad_norm": 14.242535591125488,
1606
+ "learning_rate": 5.138036809815952e-05,
1607
+ "loss": 0.3045,
1608
+ "step": 390
1609
+ },
1610
+ {
1611
+ "epoch": 13.517241379310345,
1612
+ "grad_norm": 10.820467948913574,
1613
+ "learning_rate": 5.107361963190185e-05,
1614
+ "loss": 0.191,
1615
+ "step": 392
1616
+ },
1617
+ {
1618
+ "epoch": 13.586206896551724,
1619
+ "grad_norm": 18.95819091796875,
1620
+ "learning_rate": 5.076687116564417e-05,
1621
+ "loss": 0.5458,
1622
+ "step": 394
1623
+ },
1624
+ {
1625
+ "epoch": 13.655172413793103,
1626
+ "grad_norm": 21.91457748413086,
1627
+ "learning_rate": 5.04601226993865e-05,
1628
+ "loss": 0.7368,
1629
+ "step": 396
1630
+ },
1631
+ {
1632
+ "epoch": 13.724137931034482,
1633
+ "grad_norm": 28.396440505981445,
1634
+ "learning_rate": 5.015337423312883e-05,
1635
+ "loss": 0.6519,
1636
+ "step": 398
1637
+ },
1638
+ {
1639
+ "epoch": 13.793103448275861,
1640
+ "grad_norm": 16.6456356048584,
1641
+ "learning_rate": 4.984662576687117e-05,
1642
+ "loss": 0.5557,
1643
+ "step": 400
1644
+ },
1645
+ {
1646
+ "epoch": 13.862068965517242,
1647
+ "grad_norm": 19.1430606842041,
1648
+ "learning_rate": 4.9539877300613504e-05,
1649
+ "loss": 0.6529,
1650
+ "step": 402
1651
+ },
1652
+ {
1653
+ "epoch": 13.931034482758621,
1654
+ "grad_norm": 26.434980392456055,
1655
+ "learning_rate": 4.923312883435583e-05,
1656
+ "loss": 0.6911,
1657
+ "step": 404
1658
+ },
1659
+ {
1660
+ "epoch": 14.0,
1661
+ "grad_norm": 30.586454391479492,
1662
+ "learning_rate": 4.892638036809816e-05,
1663
+ "loss": 0.5415,
1664
+ "step": 406
1665
+ },
1666
+ {
1667
+ "epoch": 14.0,
1668
+ "eval_accuracy": 0.5909090909090909,
1669
+ "eval_f1_macro": 0.5132147277659137,
1670
+ "eval_f1_micro": 0.5909090909090909,
1671
+ "eval_f1_weighted": 0.5795267633305671,
1672
+ "eval_loss": 1.4251549243927002,
1673
+ "eval_precision_macro": 0.5222798718196696,
1674
+ "eval_precision_micro": 0.5909090909090909,
1675
+ "eval_precision_weighted": 0.5892947366820627,
1676
+ "eval_recall_macro": 0.5254724111866969,
1677
+ "eval_recall_micro": 0.5909090909090909,
1678
+ "eval_recall_weighted": 0.5909090909090909,
1679
+ "eval_runtime": 2.1927,
1680
+ "eval_samples_per_second": 60.199,
1681
+ "eval_steps_per_second": 7.753,
1682
+ "step": 406
1683
+ },
1684
+ {
1685
+ "epoch": 14.068965517241379,
1686
+ "grad_norm": 22.47860336303711,
1687
+ "learning_rate": 4.8619631901840495e-05,
1688
+ "loss": 0.2761,
1689
+ "step": 408
1690
+ },
1691
+ {
1692
+ "epoch": 14.137931034482758,
1693
+ "grad_norm": 6.592859745025635,
1694
+ "learning_rate": 4.831288343558282e-05,
1695
+ "loss": 0.2115,
1696
+ "step": 410
1697
+ },
1698
+ {
1699
+ "epoch": 14.206896551724139,
1700
+ "grad_norm": 11.651180267333984,
1701
+ "learning_rate": 4.800613496932516e-05,
1702
+ "loss": 0.2898,
1703
+ "step": 412
1704
+ },
1705
+ {
1706
+ "epoch": 14.275862068965518,
1707
+ "grad_norm": 24.14730453491211,
1708
+ "learning_rate": 4.769938650306749e-05,
1709
+ "loss": 0.468,
1710
+ "step": 414
1711
+ },
1712
+ {
1713
+ "epoch": 14.344827586206897,
1714
+ "grad_norm": 22.79234504699707,
1715
+ "learning_rate": 4.739263803680982e-05,
1716
+ "loss": 0.3194,
1717
+ "step": 416
1718
+ },
1719
+ {
1720
+ "epoch": 14.413793103448276,
1721
+ "grad_norm": 19.285917282104492,
1722
+ "learning_rate": 4.708588957055215e-05,
1723
+ "loss": 0.3259,
1724
+ "step": 418
1725
+ },
1726
+ {
1727
+ "epoch": 14.482758620689655,
1728
+ "grad_norm": 15.568497657775879,
1729
+ "learning_rate": 4.677914110429448e-05,
1730
+ "loss": 0.229,
1731
+ "step": 420
1732
+ },
1733
+ {
1734
+ "epoch": 14.551724137931034,
1735
+ "grad_norm": 6.664709091186523,
1736
+ "learning_rate": 4.647239263803681e-05,
1737
+ "loss": 0.5116,
1738
+ "step": 422
1739
+ },
1740
+ {
1741
+ "epoch": 14.620689655172415,
1742
+ "grad_norm": 27.09947967529297,
1743
+ "learning_rate": 4.616564417177914e-05,
1744
+ "loss": 0.3526,
1745
+ "step": 424
1746
+ },
1747
+ {
1748
+ "epoch": 14.689655172413794,
1749
+ "grad_norm": 19.279560089111328,
1750
+ "learning_rate": 4.585889570552148e-05,
1751
+ "loss": 0.327,
1752
+ "step": 426
1753
+ },
1754
+ {
1755
+ "epoch": 14.758620689655173,
1756
+ "grad_norm": 14.68875789642334,
1757
+ "learning_rate": 4.5552147239263805e-05,
1758
+ "loss": 0.1932,
1759
+ "step": 428
1760
+ },
1761
+ {
1762
+ "epoch": 14.827586206896552,
1763
+ "grad_norm": 10.188983917236328,
1764
+ "learning_rate": 4.524539877300614e-05,
1765
+ "loss": 0.3491,
1766
+ "step": 430
1767
+ },
1768
+ {
1769
+ "epoch": 14.89655172413793,
1770
+ "grad_norm": 30.82689094543457,
1771
+ "learning_rate": 4.493865030674847e-05,
1772
+ "loss": 0.5371,
1773
+ "step": 432
1774
+ },
1775
+ {
1776
+ "epoch": 14.96551724137931,
1777
+ "grad_norm": 25.854101181030273,
1778
+ "learning_rate": 4.4631901840490795e-05,
1779
+ "loss": 0.4421,
1780
+ "step": 434
1781
+ },
1782
+ {
1783
+ "epoch": 15.0,
1784
+ "eval_accuracy": 0.6136363636363636,
1785
+ "eval_f1_macro": 0.5574127938548423,
1786
+ "eval_f1_micro": 0.6136363636363636,
1787
+ "eval_f1_weighted": 0.6086044486243096,
1788
+ "eval_loss": 1.4080591201782227,
1789
+ "eval_precision_macro": 0.5752992105933282,
1790
+ "eval_precision_micro": 0.6136363636363636,
1791
+ "eval_precision_weighted": 0.6148821098687408,
1792
+ "eval_recall_macro": 0.5531670445956159,
1793
+ "eval_recall_micro": 0.6136363636363636,
1794
+ "eval_recall_weighted": 0.6136363636363636,
1795
+ "eval_runtime": 2.1892,
1796
+ "eval_samples_per_second": 60.296,
1797
+ "eval_steps_per_second": 7.765,
1798
+ "step": 435
1799
+ },
1800
+ {
1801
+ "epoch": 15.03448275862069,
1802
+ "grad_norm": 10.050946235656738,
1803
+ "learning_rate": 4.432515337423313e-05,
1804
+ "loss": 0.3007,
1805
+ "step": 436
1806
+ },
1807
+ {
1808
+ "epoch": 15.10344827586207,
1809
+ "grad_norm": 12.756734848022461,
1810
+ "learning_rate": 4.4018404907975466e-05,
1811
+ "loss": 0.3396,
1812
+ "step": 438
1813
+ },
1814
+ {
1815
+ "epoch": 15.172413793103448,
1816
+ "grad_norm": 25.455589294433594,
1817
+ "learning_rate": 4.371165644171779e-05,
1818
+ "loss": 0.3914,
1819
+ "step": 440
1820
+ },
1821
+ {
1822
+ "epoch": 15.241379310344827,
1823
+ "grad_norm": 13.843463897705078,
1824
+ "learning_rate": 4.3404907975460124e-05,
1825
+ "loss": 0.3308,
1826
+ "step": 442
1827
+ },
1828
+ {
1829
+ "epoch": 15.310344827586206,
1830
+ "grad_norm": 23.339752197265625,
1831
+ "learning_rate": 4.309815950920246e-05,
1832
+ "loss": 0.4584,
1833
+ "step": 444
1834
+ },
1835
+ {
1836
+ "epoch": 15.379310344827585,
1837
+ "grad_norm": 8.64341926574707,
1838
+ "learning_rate": 4.279141104294479e-05,
1839
+ "loss": 0.1732,
1840
+ "step": 446
1841
+ },
1842
+ {
1843
+ "epoch": 15.448275862068966,
1844
+ "grad_norm": 6.727046012878418,
1845
+ "learning_rate": 4.2484662576687115e-05,
1846
+ "loss": 0.2713,
1847
+ "step": 448
1848
+ },
1849
+ {
1850
+ "epoch": 15.517241379310345,
1851
+ "grad_norm": 30.494932174682617,
1852
+ "learning_rate": 4.2177914110429454e-05,
1853
+ "loss": 0.2759,
1854
+ "step": 450
1855
+ },
1856
+ {
1857
+ "epoch": 15.586206896551724,
1858
+ "grad_norm": 25.142616271972656,
1859
+ "learning_rate": 4.1871165644171786e-05,
1860
+ "loss": 0.2707,
1861
+ "step": 452
1862
+ },
1863
+ {
1864
+ "epoch": 15.655172413793103,
1865
+ "grad_norm": 27.547733306884766,
1866
+ "learning_rate": 4.156441717791411e-05,
1867
+ "loss": 0.5069,
1868
+ "step": 454
1869
+ },
1870
+ {
1871
+ "epoch": 15.724137931034482,
1872
+ "grad_norm": 19.634178161621094,
1873
+ "learning_rate": 4.1257668711656444e-05,
1874
+ "loss": 0.3872,
1875
+ "step": 456
1876
+ },
1877
+ {
1878
+ "epoch": 15.793103448275861,
1879
+ "grad_norm": 21.880495071411133,
1880
+ "learning_rate": 4.0950920245398776e-05,
1881
+ "loss": 0.2277,
1882
+ "step": 458
1883
+ },
1884
+ {
1885
+ "epoch": 15.862068965517242,
1886
+ "grad_norm": 15.454160690307617,
1887
+ "learning_rate": 4.06441717791411e-05,
1888
+ "loss": 0.3267,
1889
+ "step": 460
1890
+ },
1891
+ {
1892
+ "epoch": 15.931034482758621,
1893
+ "grad_norm": 15.292703628540039,
1894
+ "learning_rate": 4.033742331288344e-05,
1895
+ "loss": 0.2948,
1896
+ "step": 462
1897
+ },
1898
+ {
1899
+ "epoch": 16.0,
1900
+ "grad_norm": 24.81329917907715,
1901
+ "learning_rate": 4.0030674846625773e-05,
1902
+ "loss": 0.2893,
1903
+ "step": 464
1904
+ },
1905
+ {
1906
+ "epoch": 16.0,
1907
+ "eval_accuracy": 0.5984848484848485,
1908
+ "eval_f1_macro": 0.512701663933191,
1909
+ "eval_f1_micro": 0.5984848484848485,
1910
+ "eval_f1_weighted": 0.5832852686300961,
1911
+ "eval_loss": 1.5284953117370605,
1912
+ "eval_precision_macro": 0.5059085452362763,
1913
+ "eval_precision_micro": 0.5984848484848485,
1914
+ "eval_precision_weighted": 0.5752329251259732,
1915
+ "eval_recall_macro": 0.5253136810279667,
1916
+ "eval_recall_micro": 0.5984848484848485,
1917
+ "eval_recall_weighted": 0.5984848484848485,
1918
+ "eval_runtime": 2.1964,
1919
+ "eval_samples_per_second": 60.099,
1920
+ "eval_steps_per_second": 7.74,
1921
+ "step": 464
1922
+ },
1923
+ {
1924
+ "epoch": 16.06896551724138,
1925
+ "grad_norm": 19.42568588256836,
1926
+ "learning_rate": 3.97239263803681e-05,
1927
+ "loss": 0.1854,
1928
+ "step": 466
1929
+ },
1930
+ {
1931
+ "epoch": 16.137931034482758,
1932
+ "grad_norm": 12.055990219116211,
1933
+ "learning_rate": 3.941717791411043e-05,
1934
+ "loss": 0.2725,
1935
+ "step": 468
1936
+ },
1937
+ {
1938
+ "epoch": 16.20689655172414,
1939
+ "grad_norm": 22.306148529052734,
1940
+ "learning_rate": 3.9110429447852764e-05,
1941
+ "loss": 0.2066,
1942
+ "step": 470
1943
+ },
1944
+ {
1945
+ "epoch": 16.275862068965516,
1946
+ "grad_norm": 15.890237808227539,
1947
+ "learning_rate": 3.880368098159509e-05,
1948
+ "loss": 0.3127,
1949
+ "step": 472
1950
+ },
1951
+ {
1952
+ "epoch": 16.344827586206897,
1953
+ "grad_norm": 17.045835494995117,
1954
+ "learning_rate": 3.849693251533742e-05,
1955
+ "loss": 0.3555,
1956
+ "step": 474
1957
+ },
1958
+ {
1959
+ "epoch": 16.413793103448278,
1960
+ "grad_norm": 15.841018676757812,
1961
+ "learning_rate": 3.819018404907976e-05,
1962
+ "loss": 0.3553,
1963
+ "step": 476
1964
+ },
1965
+ {
1966
+ "epoch": 16.482758620689655,
1967
+ "grad_norm": 11.002803802490234,
1968
+ "learning_rate": 3.7883435582822086e-05,
1969
+ "loss": 0.3044,
1970
+ "step": 478
1971
+ },
1972
+ {
1973
+ "epoch": 16.551724137931036,
1974
+ "grad_norm": 8.885010719299316,
1975
+ "learning_rate": 3.757668711656442e-05,
1976
+ "loss": 0.2396,
1977
+ "step": 480
1978
+ },
1979
+ {
1980
+ "epoch": 16.620689655172413,
1981
+ "grad_norm": 20.58298110961914,
1982
+ "learning_rate": 3.726993865030675e-05,
1983
+ "loss": 0.2576,
1984
+ "step": 482
1985
+ },
1986
+ {
1987
+ "epoch": 16.689655172413794,
1988
+ "grad_norm": 18.69637107849121,
1989
+ "learning_rate": 3.696319018404908e-05,
1990
+ "loss": 0.3052,
1991
+ "step": 484
1992
+ },
1993
+ {
1994
+ "epoch": 16.75862068965517,
1995
+ "grad_norm": 7.023503303527832,
1996
+ "learning_rate": 3.665644171779141e-05,
1997
+ "loss": 0.1449,
1998
+ "step": 486
1999
+ },
2000
+ {
2001
+ "epoch": 16.82758620689655,
2002
+ "grad_norm": 18.077198028564453,
2003
+ "learning_rate": 3.634969325153375e-05,
2004
+ "loss": 0.3661,
2005
+ "step": 488
2006
+ },
2007
+ {
2008
+ "epoch": 16.896551724137932,
2009
+ "grad_norm": 17.02280616760254,
2010
+ "learning_rate": 3.6042944785276074e-05,
2011
+ "loss": 0.2259,
2012
+ "step": 490
2013
+ },
2014
+ {
2015
+ "epoch": 16.96551724137931,
2016
+ "grad_norm": 20.76211929321289,
2017
+ "learning_rate": 3.5736196319018406e-05,
2018
+ "loss": 0.2403,
2019
+ "step": 492
2020
+ },
2021
+ {
2022
+ "epoch": 17.0,
2023
+ "eval_accuracy": 0.6287878787878788,
2024
+ "eval_f1_macro": 0.5395048301452242,
2025
+ "eval_f1_micro": 0.6287878787878788,
2026
+ "eval_f1_weighted": 0.6065423604608557,
2027
+ "eval_loss": 1.4820140600204468,
2028
+ "eval_precision_macro": 0.58078231292517,
2029
+ "eval_precision_micro": 0.6287878787878788,
2030
+ "eval_precision_weighted": 0.6380321067821069,
2031
+ "eval_recall_macro": 0.5459561602418744,
2032
+ "eval_recall_micro": 0.6287878787878788,
2033
+ "eval_recall_weighted": 0.6287878787878788,
2034
+ "eval_runtime": 2.1863,
2035
+ "eval_samples_per_second": 60.376,
2036
+ "eval_steps_per_second": 7.776,
2037
+ "step": 493
2038
+ },
2039
+ {
2040
+ "epoch": 17.03448275862069,
2041
+ "grad_norm": 20.38401985168457,
2042
+ "learning_rate": 3.542944785276074e-05,
2043
+ "loss": 0.3407,
2044
+ "step": 494
2045
+ },
2046
+ {
2047
+ "epoch": 17.103448275862068,
2048
+ "grad_norm": 5.897765636444092,
2049
+ "learning_rate": 3.512269938650307e-05,
2050
+ "loss": 0.1841,
2051
+ "step": 496
2052
+ },
2053
+ {
2054
+ "epoch": 17.17241379310345,
2055
+ "grad_norm": 15.455453872680664,
2056
+ "learning_rate": 3.4815950920245396e-05,
2057
+ "loss": 0.1269,
2058
+ "step": 498
2059
+ },
2060
+ {
2061
+ "epoch": 17.24137931034483,
2062
+ "grad_norm": 27.898658752441406,
2063
+ "learning_rate": 3.4509202453987735e-05,
2064
+ "loss": 0.2314,
2065
+ "step": 500
2066
+ },
2067
+ {
2068
+ "epoch": 17.310344827586206,
2069
+ "grad_norm": 19.497142791748047,
2070
+ "learning_rate": 3.420245398773007e-05,
2071
+ "loss": 0.2743,
2072
+ "step": 502
2073
+ },
2074
+ {
2075
+ "epoch": 17.379310344827587,
2076
+ "grad_norm": 15.271653175354004,
2077
+ "learning_rate": 3.3895705521472393e-05,
2078
+ "loss": 0.1961,
2079
+ "step": 504
2080
+ },
2081
+ {
2082
+ "epoch": 17.448275862068964,
2083
+ "grad_norm": 25.726585388183594,
2084
+ "learning_rate": 3.3588957055214726e-05,
2085
+ "loss": 0.3415,
2086
+ "step": 506
2087
+ },
2088
+ {
2089
+ "epoch": 17.517241379310345,
2090
+ "grad_norm": 22.30027198791504,
2091
+ "learning_rate": 3.328220858895706e-05,
2092
+ "loss": 0.2365,
2093
+ "step": 508
2094
+ },
2095
+ {
2096
+ "epoch": 17.586206896551722,
2097
+ "grad_norm": 27.83454132080078,
2098
+ "learning_rate": 3.2975460122699384e-05,
2099
+ "loss": 0.323,
2100
+ "step": 510
2101
+ },
2102
+ {
2103
+ "epoch": 17.655172413793103,
2104
+ "grad_norm": 17.994998931884766,
2105
+ "learning_rate": 3.266871165644172e-05,
2106
+ "loss": 0.2189,
2107
+ "step": 512
2108
+ },
2109
+ {
2110
+ "epoch": 17.724137931034484,
2111
+ "grad_norm": 15.599747657775879,
2112
+ "learning_rate": 3.2361963190184055e-05,
2113
+ "loss": 0.2235,
2114
+ "step": 514
2115
+ },
2116
+ {
2117
+ "epoch": 17.79310344827586,
2118
+ "grad_norm": 19.07125473022461,
2119
+ "learning_rate": 3.205521472392638e-05,
2120
+ "loss": 0.2625,
2121
+ "step": 516
2122
+ },
2123
+ {
2124
+ "epoch": 17.862068965517242,
2125
+ "grad_norm": 5.794015407562256,
2126
+ "learning_rate": 3.174846625766871e-05,
2127
+ "loss": 0.1962,
2128
+ "step": 518
2129
+ },
2130
+ {
2131
+ "epoch": 17.93103448275862,
2132
+ "grad_norm": 27.05267906188965,
2133
+ "learning_rate": 3.1441717791411045e-05,
2134
+ "loss": 0.4204,
2135
+ "step": 520
2136
+ },
2137
+ {
2138
+ "epoch": 18.0,
2139
+ "grad_norm": 12.915875434875488,
2140
+ "learning_rate": 3.113496932515337e-05,
2141
+ "loss": 0.1087,
2142
+ "step": 522
2143
+ },
2144
+ {
2145
+ "epoch": 18.0,
2146
+ "eval_accuracy": 0.6060606060606061,
2147
+ "eval_f1_macro": 0.5319546411035773,
2148
+ "eval_f1_micro": 0.6060606060606061,
2149
+ "eval_f1_weighted": 0.6009282162872589,
2150
+ "eval_loss": 1.3999419212341309,
2151
+ "eval_precision_macro": 0.5611877440448869,
2152
+ "eval_precision_micro": 0.6060606060606061,
2153
+ "eval_precision_weighted": 0.6210855415400871,
2154
+ "eval_recall_macro": 0.5260619803476947,
2155
+ "eval_recall_micro": 0.6060606060606061,
2156
+ "eval_recall_weighted": 0.6060606060606061,
2157
+ "eval_runtime": 2.1908,
2158
+ "eval_samples_per_second": 60.251,
2159
+ "eval_steps_per_second": 7.76,
2160
+ "step": 522
2161
+ },
2162
+ {
2163
+ "epoch": 18.06896551724138,
2164
+ "grad_norm": 17.82192039489746,
2165
+ "learning_rate": 3.0828220858895703e-05,
2166
+ "loss": 0.1255,
2167
+ "step": 524
2168
+ },
2169
+ {
2170
+ "epoch": 18.137931034482758,
2171
+ "grad_norm": 13.459417343139648,
2172
+ "learning_rate": 3.052147239263804e-05,
2173
+ "loss": 0.1728,
2174
+ "step": 526
2175
+ },
2176
+ {
2177
+ "epoch": 18.20689655172414,
2178
+ "grad_norm": 19.81383514404297,
2179
+ "learning_rate": 3.0214723926380368e-05,
2180
+ "loss": 0.1743,
2181
+ "step": 528
2182
+ },
2183
+ {
2184
+ "epoch": 18.275862068965516,
2185
+ "grad_norm": 17.316072463989258,
2186
+ "learning_rate": 2.99079754601227e-05,
2187
+ "loss": 0.2115,
2188
+ "step": 530
2189
+ },
2190
+ {
2191
+ "epoch": 18.344827586206897,
2192
+ "grad_norm": 25.32339096069336,
2193
+ "learning_rate": 2.9601226993865033e-05,
2194
+ "loss": 0.387,
2195
+ "step": 532
2196
+ },
2197
+ {
2198
+ "epoch": 18.413793103448278,
2199
+ "grad_norm": 10.883082389831543,
2200
+ "learning_rate": 2.9294478527607362e-05,
2201
+ "loss": 0.0874,
2202
+ "step": 534
2203
+ },
2204
+ {
2205
+ "epoch": 18.482758620689655,
2206
+ "grad_norm": 25.40140151977539,
2207
+ "learning_rate": 2.8987730061349694e-05,
2208
+ "loss": 0.3103,
2209
+ "step": 536
2210
+ },
2211
+ {
2212
+ "epoch": 18.551724137931036,
2213
+ "grad_norm": 13.151557922363281,
2214
+ "learning_rate": 2.8680981595092026e-05,
2215
+ "loss": 0.277,
2216
+ "step": 538
2217
+ },
2218
+ {
2219
+ "epoch": 18.620689655172413,
2220
+ "grad_norm": 16.688093185424805,
2221
+ "learning_rate": 2.837423312883436e-05,
2222
+ "loss": 0.1661,
2223
+ "step": 540
2224
+ },
2225
+ {
2226
+ "epoch": 18.689655172413794,
2227
+ "grad_norm": 3.376065492630005,
2228
+ "learning_rate": 2.8067484662576688e-05,
2229
+ "loss": 0.2142,
2230
+ "step": 542
2231
+ },
2232
+ {
2233
+ "epoch": 18.75862068965517,
2234
+ "grad_norm": 20.72919464111328,
2235
+ "learning_rate": 2.776073619631902e-05,
2236
+ "loss": 0.2779,
2237
+ "step": 544
2238
+ },
2239
+ {
2240
+ "epoch": 18.82758620689655,
2241
+ "grad_norm": 3.795419454574585,
2242
+ "learning_rate": 2.7453987730061353e-05,
2243
+ "loss": 0.0706,
2244
+ "step": 546
2245
+ },
2246
+ {
2247
+ "epoch": 18.896551724137932,
2248
+ "grad_norm": 18.60194969177246,
2249
+ "learning_rate": 2.714723926380368e-05,
2250
+ "loss": 0.0951,
2251
+ "step": 548
2252
+ },
2253
+ {
2254
+ "epoch": 18.96551724137931,
2255
+ "grad_norm": 17.101329803466797,
2256
+ "learning_rate": 2.6840490797546014e-05,
2257
+ "loss": 0.2619,
2258
+ "step": 550
2259
+ },
2260
+ {
2261
+ "epoch": 19.0,
2262
+ "eval_accuracy": 0.6136363636363636,
2263
+ "eval_f1_macro": 0.561813814539783,
2264
+ "eval_f1_micro": 0.6136363636363636,
2265
+ "eval_f1_weighted": 0.6037404914018973,
2266
+ "eval_loss": 1.440819263458252,
2267
+ "eval_precision_macro": 0.6154006028203717,
2268
+ "eval_precision_micro": 0.6136363636363636,
2269
+ "eval_precision_weighted": 0.6224909931745618,
2270
+ "eval_recall_macro": 0.5501284958427816,
2271
+ "eval_recall_micro": 0.6136363636363636,
2272
+ "eval_recall_weighted": 0.6136363636363636,
2273
+ "eval_runtime": 2.201,
2274
+ "eval_samples_per_second": 59.973,
2275
+ "eval_steps_per_second": 7.724,
2276
+ "step": 551
2277
+ },
2278
+ {
2279
+ "epoch": 19.03448275862069,
2280
+ "grad_norm": 13.738136291503906,
2281
+ "learning_rate": 2.6533742331288346e-05,
2282
+ "loss": 0.1927,
2283
+ "step": 552
2284
+ },
2285
+ {
2286
+ "epoch": 19.103448275862068,
2287
+ "grad_norm": 17.96263885498047,
2288
+ "learning_rate": 2.6226993865030675e-05,
2289
+ "loss": 0.1388,
2290
+ "step": 554
2291
+ },
2292
+ {
2293
+ "epoch": 19.17241379310345,
2294
+ "grad_norm": 25.091278076171875,
2295
+ "learning_rate": 2.5920245398773008e-05,
2296
+ "loss": 0.1476,
2297
+ "step": 556
2298
+ },
2299
+ {
2300
+ "epoch": 19.24137931034483,
2301
+ "grad_norm": 14.9843168258667,
2302
+ "learning_rate": 2.561349693251534e-05,
2303
+ "loss": 0.116,
2304
+ "step": 558
2305
+ },
2306
+ {
2307
+ "epoch": 19.310344827586206,
2308
+ "grad_norm": 13.588825225830078,
2309
+ "learning_rate": 2.530674846625767e-05,
2310
+ "loss": 0.1187,
2311
+ "step": 560
2312
+ },
2313
+ {
2314
+ "epoch": 19.379310344827587,
2315
+ "grad_norm": 8.29517650604248,
2316
+ "learning_rate": 2.5e-05,
2317
+ "loss": 0.1494,
2318
+ "step": 562
2319
+ },
2320
+ {
2321
+ "epoch": 19.448275862068964,
2322
+ "grad_norm": 24.074113845825195,
2323
+ "learning_rate": 2.469325153374233e-05,
2324
+ "loss": 0.1549,
2325
+ "step": 564
2326
+ },
2327
+ {
2328
+ "epoch": 19.517241379310345,
2329
+ "grad_norm": 7.5761213302612305,
2330
+ "learning_rate": 2.4386503067484666e-05,
2331
+ "loss": 0.0937,
2332
+ "step": 566
2333
+ },
2334
+ {
2335
+ "epoch": 19.586206896551722,
2336
+ "grad_norm": 9.566593170166016,
2337
+ "learning_rate": 2.4079754601226995e-05,
2338
+ "loss": 0.1075,
2339
+ "step": 568
2340
+ },
2341
+ {
2342
+ "epoch": 19.655172413793103,
2343
+ "grad_norm": 14.346840858459473,
2344
+ "learning_rate": 2.3773006134969324e-05,
2345
+ "loss": 0.112,
2346
+ "step": 570
2347
+ },
2348
+ {
2349
+ "epoch": 19.724137931034484,
2350
+ "grad_norm": 22.044532775878906,
2351
+ "learning_rate": 2.346625766871166e-05,
2352
+ "loss": 0.1831,
2353
+ "step": 572
2354
+ },
2355
+ {
2356
+ "epoch": 19.79310344827586,
2357
+ "grad_norm": 10.464526176452637,
2358
+ "learning_rate": 2.315950920245399e-05,
2359
+ "loss": 0.1353,
2360
+ "step": 574
2361
+ },
2362
+ {
2363
+ "epoch": 19.862068965517242,
2364
+ "grad_norm": 6.597527980804443,
2365
+ "learning_rate": 2.285276073619632e-05,
2366
+ "loss": 0.073,
2367
+ "step": 576
2368
+ },
2369
+ {
2370
+ "epoch": 19.93103448275862,
2371
+ "grad_norm": 3.7595765590667725,
2372
+ "learning_rate": 2.2546012269938653e-05,
2373
+ "loss": 0.0686,
2374
+ "step": 578
2375
+ },
2376
+ {
2377
+ "epoch": 20.0,
2378
+ "grad_norm": 4.094130516052246,
2379
+ "learning_rate": 2.2239263803680982e-05,
2380
+ "loss": 0.1154,
2381
+ "step": 580
2382
+ },
2383
+ {
2384
+ "epoch": 20.0,
2385
+ "eval_accuracy": 0.6287878787878788,
2386
+ "eval_f1_macro": 0.5401653994931305,
2387
+ "eval_f1_micro": 0.6287878787878788,
2388
+ "eval_f1_weighted": 0.6089890501655209,
2389
+ "eval_loss": 1.4516006708145142,
2390
+ "eval_precision_macro": 0.5538492063492063,
2391
+ "eval_precision_micro": 0.6287878787878788,
2392
+ "eval_precision_weighted": 0.6144570707070707,
2393
+ "eval_recall_macro": 0.5491912320483749,
2394
+ "eval_recall_micro": 0.6287878787878788,
2395
+ "eval_recall_weighted": 0.6287878787878788,
2396
+ "eval_runtime": 2.2073,
2397
+ "eval_samples_per_second": 59.802,
2398
+ "eval_steps_per_second": 7.702,
2399
+ "step": 580
2400
+ },
2401
+ {
2402
+ "epoch": 20.06896551724138,
2403
+ "grad_norm": 9.36839485168457,
2404
+ "learning_rate": 2.1932515337423315e-05,
2405
+ "loss": 0.0687,
2406
+ "step": 582
2407
+ },
2408
+ {
2409
+ "epoch": 20.137931034482758,
2410
+ "grad_norm": 24.775922775268555,
2411
+ "learning_rate": 2.1625766871165647e-05,
2412
+ "loss": 0.1334,
2413
+ "step": 584
2414
+ },
2415
+ {
2416
+ "epoch": 20.20689655172414,
2417
+ "grad_norm": 23.269336700439453,
2418
+ "learning_rate": 2.1319018404907976e-05,
2419
+ "loss": 0.1813,
2420
+ "step": 586
2421
+ },
2422
+ {
2423
+ "epoch": 20.275862068965516,
2424
+ "grad_norm": 5.708396911621094,
2425
+ "learning_rate": 2.1012269938650308e-05,
2426
+ "loss": 0.0841,
2427
+ "step": 588
2428
+ },
2429
+ {
2430
+ "epoch": 20.344827586206897,
2431
+ "grad_norm": 11.590498924255371,
2432
+ "learning_rate": 2.0705521472392637e-05,
2433
+ "loss": 0.116,
2434
+ "step": 590
2435
+ },
2436
+ {
2437
+ "epoch": 20.413793103448278,
2438
+ "grad_norm": 18.878385543823242,
2439
+ "learning_rate": 2.039877300613497e-05,
2440
+ "loss": 0.1187,
2441
+ "step": 592
2442
+ },
2443
+ {
2444
+ "epoch": 20.482758620689655,
2445
+ "grad_norm": 15.631240844726562,
2446
+ "learning_rate": 2.0092024539877302e-05,
2447
+ "loss": 0.1785,
2448
+ "step": 594
2449
+ },
2450
+ {
2451
+ "epoch": 20.551724137931036,
2452
+ "grad_norm": 9.58936595916748,
2453
+ "learning_rate": 1.978527607361963e-05,
2454
+ "loss": 0.0336,
2455
+ "step": 596
2456
+ },
2457
+ {
2458
+ "epoch": 20.620689655172413,
2459
+ "grad_norm": 15.450642585754395,
2460
+ "learning_rate": 1.9478527607361967e-05,
2461
+ "loss": 0.1695,
2462
+ "step": 598
2463
+ },
2464
+ {
2465
+ "epoch": 20.689655172413794,
2466
+ "grad_norm": 4.292616844177246,
2467
+ "learning_rate": 1.9171779141104296e-05,
2468
+ "loss": 0.0633,
2469
+ "step": 600
2470
+ },
2471
+ {
2472
+ "epoch": 20.75862068965517,
2473
+ "grad_norm": 4.748676776885986,
2474
+ "learning_rate": 1.8865030674846625e-05,
2475
+ "loss": 0.0889,
2476
+ "step": 602
2477
+ },
2478
+ {
2479
+ "epoch": 20.82758620689655,
2480
+ "grad_norm": 16.53461265563965,
2481
+ "learning_rate": 1.855828220858896e-05,
2482
+ "loss": 0.1608,
2483
+ "step": 604
2484
+ },
2485
+ {
2486
+ "epoch": 20.896551724137932,
2487
+ "grad_norm": 26.134490966796875,
2488
+ "learning_rate": 1.825153374233129e-05,
2489
+ "loss": 0.1693,
2490
+ "step": 606
2491
+ },
2492
+ {
2493
+ "epoch": 20.96551724137931,
2494
+ "grad_norm": 20.084346771240234,
2495
+ "learning_rate": 1.7944785276073618e-05,
2496
+ "loss": 0.1367,
2497
+ "step": 608
2498
+ },
2499
+ {
2500
+ "epoch": 21.0,
2501
+ "eval_accuracy": 0.6136363636363636,
2502
+ "eval_f1_macro": 0.5254125588472093,
2503
+ "eval_f1_micro": 0.6136363636363636,
2504
+ "eval_f1_weighted": 0.5942474496633104,
2505
+ "eval_loss": 1.530592441558838,
2506
+ "eval_precision_macro": 0.5321410615528263,
2507
+ "eval_precision_micro": 0.6136363636363636,
2508
+ "eval_precision_weighted": 0.5922574157868276,
2509
+ "eval_recall_macro": 0.5339984882842026,
2510
+ "eval_recall_micro": 0.6136363636363636,
2511
+ "eval_recall_weighted": 0.6136363636363636,
2512
+ "eval_runtime": 2.2038,
2513
+ "eval_samples_per_second": 59.896,
2514
+ "eval_steps_per_second": 7.714,
2515
+ "step": 609
2516
+ },
2517
+ {
2518
+ "epoch": 21.03448275862069,
2519
+ "grad_norm": 4.850620746612549,
2520
+ "learning_rate": 1.7638036809815954e-05,
2521
+ "loss": 0.0614,
2522
+ "step": 610
2523
+ },
2524
+ {
2525
+ "epoch": 21.103448275862068,
2526
+ "grad_norm": 23.208776473999023,
2527
+ "learning_rate": 1.7331288343558283e-05,
2528
+ "loss": 0.1307,
2529
+ "step": 612
2530
+ },
2531
+ {
2532
+ "epoch": 21.17241379310345,
2533
+ "grad_norm": 8.065655708312988,
2534
+ "learning_rate": 1.7024539877300612e-05,
2535
+ "loss": 0.0389,
2536
+ "step": 614
2537
+ },
2538
+ {
2539
+ "epoch": 21.24137931034483,
2540
+ "grad_norm": 9.520572662353516,
2541
+ "learning_rate": 1.6717791411042948e-05,
2542
+ "loss": 0.1722,
2543
+ "step": 616
2544
+ },
2545
+ {
2546
+ "epoch": 21.310344827586206,
2547
+ "grad_norm": 26.25343894958496,
2548
+ "learning_rate": 1.6411042944785277e-05,
2549
+ "loss": 0.2112,
2550
+ "step": 618
2551
+ },
2552
+ {
2553
+ "epoch": 21.379310344827587,
2554
+ "grad_norm": 6.016479015350342,
2555
+ "learning_rate": 1.6104294478527606e-05,
2556
+ "loss": 0.1577,
2557
+ "step": 620
2558
+ },
2559
+ {
2560
+ "epoch": 21.448275862068964,
2561
+ "grad_norm": 28.95755386352539,
2562
+ "learning_rate": 1.579754601226994e-05,
2563
+ "loss": 0.1363,
2564
+ "step": 622
2565
+ },
2566
+ {
2567
+ "epoch": 21.517241379310345,
2568
+ "grad_norm": 5.472126483917236,
2569
+ "learning_rate": 1.549079754601227e-05,
2570
+ "loss": 0.0527,
2571
+ "step": 624
2572
+ },
2573
+ {
2574
+ "epoch": 21.586206896551722,
2575
+ "grad_norm": 12.595699310302734,
2576
+ "learning_rate": 1.5184049079754603e-05,
2577
+ "loss": 0.1044,
2578
+ "step": 626
2579
+ },
2580
+ {
2581
+ "epoch": 21.655172413793103,
2582
+ "grad_norm": 13.575519561767578,
2583
+ "learning_rate": 1.4877300613496933e-05,
2584
+ "loss": 0.1545,
2585
+ "step": 628
2586
+ },
2587
+ {
2588
+ "epoch": 21.724137931034484,
2589
+ "grad_norm": 1.717926025390625,
2590
+ "learning_rate": 1.4570552147239264e-05,
2591
+ "loss": 0.0754,
2592
+ "step": 630
2593
+ },
2594
+ {
2595
+ "epoch": 21.79310344827586,
2596
+ "grad_norm": 15.80093765258789,
2597
+ "learning_rate": 1.4263803680981596e-05,
2598
+ "loss": 0.1765,
2599
+ "step": 632
2600
+ },
2601
+ {
2602
+ "epoch": 21.862068965517242,
2603
+ "grad_norm": 8.399004936218262,
2604
+ "learning_rate": 1.3957055214723927e-05,
2605
+ "loss": 0.0478,
2606
+ "step": 634
2607
+ },
2608
+ {
2609
+ "epoch": 21.93103448275862,
2610
+ "grad_norm": 9.28877067565918,
2611
+ "learning_rate": 1.3650306748466258e-05,
2612
+ "loss": 0.0561,
2613
+ "step": 636
2614
+ },
2615
+ {
2616
+ "epoch": 22.0,
2617
+ "grad_norm": 13.288439750671387,
2618
+ "learning_rate": 1.334355828220859e-05,
2619
+ "loss": 0.0839,
2620
+ "step": 638
2621
+ },
2622
+ {
2623
+ "epoch": 22.0,
2624
+ "eval_accuracy": 0.5833333333333334,
2625
+ "eval_f1_macro": 0.515415984334813,
2626
+ "eval_f1_micro": 0.5833333333333334,
2627
+ "eval_f1_weighted": 0.575588621142538,
2628
+ "eval_loss": 1.6396534442901611,
2629
+ "eval_precision_macro": 0.5274346580737558,
2630
+ "eval_precision_micro": 0.5833333333333334,
2631
+ "eval_precision_weighted": 0.5895078605604921,
2632
+ "eval_recall_macro": 0.5252003023431595,
2633
+ "eval_recall_micro": 0.5833333333333334,
2634
+ "eval_recall_weighted": 0.5833333333333334,
2635
+ "eval_runtime": 2.1936,
2636
+ "eval_samples_per_second": 60.176,
2637
+ "eval_steps_per_second": 7.75,
2638
+ "step": 638
2639
+ },
2640
+ {
2641
+ "epoch": 22.06896551724138,
2642
+ "grad_norm": 5.867281436920166,
2643
+ "learning_rate": 1.303680981595092e-05,
2644
+ "loss": 0.0999,
2645
+ "step": 640
2646
+ },
2647
+ {
2648
+ "epoch": 22.137931034482758,
2649
+ "grad_norm": 3.7638766765594482,
2650
+ "learning_rate": 1.2730061349693251e-05,
2651
+ "loss": 0.0418,
2652
+ "step": 642
2653
+ },
2654
+ {
2655
+ "epoch": 22.20689655172414,
2656
+ "grad_norm": 1.9534434080123901,
2657
+ "learning_rate": 1.2423312883435584e-05,
2658
+ "loss": 0.0351,
2659
+ "step": 644
2660
+ },
2661
+ {
2662
+ "epoch": 22.275862068965516,
2663
+ "grad_norm": 1.0892353057861328,
2664
+ "learning_rate": 1.2116564417177914e-05,
2665
+ "loss": 0.0272,
2666
+ "step": 646
2667
+ },
2668
+ {
2669
+ "epoch": 22.344827586206897,
2670
+ "grad_norm": 16.817415237426758,
2671
+ "learning_rate": 1.1809815950920245e-05,
2672
+ "loss": 0.2253,
2673
+ "step": 648
2674
+ },
2675
+ {
2676
+ "epoch": 22.413793103448278,
2677
+ "grad_norm": 24.870695114135742,
2678
+ "learning_rate": 1.1503067484662577e-05,
2679
+ "loss": 0.1316,
2680
+ "step": 650
2681
+ },
2682
+ {
2683
+ "epoch": 22.482758620689655,
2684
+ "grad_norm": 22.584014892578125,
2685
+ "learning_rate": 1.119631901840491e-05,
2686
+ "loss": 0.2264,
2687
+ "step": 652
2688
+ },
2689
+ {
2690
+ "epoch": 22.551724137931036,
2691
+ "grad_norm": 8.62193775177002,
2692
+ "learning_rate": 1.0889570552147239e-05,
2693
+ "loss": 0.0336,
2694
+ "step": 654
2695
+ },
2696
+ {
2697
+ "epoch": 22.620689655172413,
2698
+ "grad_norm": 7.243905067443848,
2699
+ "learning_rate": 1.0582822085889571e-05,
2700
+ "loss": 0.0435,
2701
+ "step": 656
2702
+ },
2703
+ {
2704
+ "epoch": 22.689655172413794,
2705
+ "grad_norm": 1.8948745727539062,
2706
+ "learning_rate": 1.0276073619631903e-05,
2707
+ "loss": 0.0655,
2708
+ "step": 658
2709
+ },
2710
+ {
2711
+ "epoch": 22.75862068965517,
2712
+ "grad_norm": 5.095564842224121,
2713
+ "learning_rate": 9.969325153374232e-06,
2714
+ "loss": 0.042,
2715
+ "step": 660
2716
+ },
2717
+ {
2718
+ "epoch": 22.82758620689655,
2719
+ "grad_norm": 25.0085506439209,
2720
+ "learning_rate": 9.662576687116565e-06,
2721
+ "loss": 0.1186,
2722
+ "step": 662
2723
+ },
2724
+ {
2725
+ "epoch": 22.896551724137932,
2726
+ "grad_norm": 4.847318172454834,
2727
+ "learning_rate": 9.355828220858897e-06,
2728
+ "loss": 0.0377,
2729
+ "step": 664
2730
+ },
2731
+ {
2732
+ "epoch": 22.96551724137931,
2733
+ "grad_norm": 39.400447845458984,
2734
+ "learning_rate": 9.049079754601228e-06,
2735
+ "loss": 0.1818,
2736
+ "step": 666
2737
+ },
2738
+ {
2739
+ "epoch": 23.0,
2740
+ "eval_accuracy": 0.6515151515151515,
2741
+ "eval_f1_macro": 0.565634487061558,
2742
+ "eval_f1_micro": 0.6515151515151515,
2743
+ "eval_f1_weighted": 0.6358752918681777,
2744
+ "eval_loss": 1.641618251800537,
2745
+ "eval_precision_macro": 0.584759718380408,
2746
+ "eval_precision_micro": 0.6515151515151515,
2747
+ "eval_precision_weighted": 0.6455531040170539,
2748
+ "eval_recall_macro": 0.5695616024187452,
2749
+ "eval_recall_micro": 0.6515151515151515,
2750
+ "eval_recall_weighted": 0.6515151515151515,
2751
+ "eval_runtime": 2.2051,
2752
+ "eval_samples_per_second": 59.861,
2753
+ "eval_steps_per_second": 7.709,
2754
+ "step": 667
2755
+ },
2756
+ {
2757
+ "epoch": 23.03448275862069,
2758
+ "grad_norm": 11.35802936553955,
2759
+ "learning_rate": 8.742331288343558e-06,
2760
+ "loss": 0.0513,
2761
+ "step": 668
2762
+ },
2763
+ {
2764
+ "epoch": 23.103448275862068,
2765
+ "grad_norm": 1.9400774240493774,
2766
+ "learning_rate": 8.435582822085889e-06,
2767
+ "loss": 0.0571,
2768
+ "step": 670
2769
+ },
2770
+ {
2771
+ "epoch": 23.17241379310345,
2772
+ "grad_norm": 3.928626775741577,
2773
+ "learning_rate": 8.128834355828221e-06,
2774
+ "loss": 0.034,
2775
+ "step": 672
2776
+ },
2777
+ {
2778
+ "epoch": 23.24137931034483,
2779
+ "grad_norm": 5.0317511558532715,
2780
+ "learning_rate": 7.822085889570554e-06,
2781
+ "loss": 0.0833,
2782
+ "step": 674
2783
+ },
2784
+ {
2785
+ "epoch": 23.310344827586206,
2786
+ "grad_norm": 12.943672180175781,
2787
+ "learning_rate": 7.5153374233128836e-06,
2788
+ "loss": 0.0575,
2789
+ "step": 676
2790
+ },
2791
+ {
2792
+ "epoch": 23.379310344827587,
2793
+ "grad_norm": 1.5223954916000366,
2794
+ "learning_rate": 7.208588957055215e-06,
2795
+ "loss": 0.0237,
2796
+ "step": 678
2797
+ },
2798
+ {
2799
+ "epoch": 23.448275862068964,
2800
+ "grad_norm": 21.462011337280273,
2801
+ "learning_rate": 6.901840490797547e-06,
2802
+ "loss": 0.0785,
2803
+ "step": 680
2804
+ },
2805
+ {
2806
+ "epoch": 23.517241379310345,
2807
+ "grad_norm": 13.98965072631836,
2808
+ "learning_rate": 6.595092024539877e-06,
2809
+ "loss": 0.0597,
2810
+ "step": 682
2811
+ },
2812
+ {
2813
+ "epoch": 23.586206896551722,
2814
+ "grad_norm": 13.07774829864502,
2815
+ "learning_rate": 6.288343558282209e-06,
2816
+ "loss": 0.0498,
2817
+ "step": 684
2818
+ },
2819
+ {
2820
+ "epoch": 23.655172413793103,
2821
+ "grad_norm": 2.752511501312256,
2822
+ "learning_rate": 5.98159509202454e-06,
2823
+ "loss": 0.028,
2824
+ "step": 686
2825
+ },
2826
+ {
2827
+ "epoch": 23.724137931034484,
2828
+ "grad_norm": 3.4312055110931396,
2829
+ "learning_rate": 5.674846625766871e-06,
2830
+ "loss": 0.0531,
2831
+ "step": 688
2832
+ },
2833
+ {
2834
+ "epoch": 23.79310344827586,
2835
+ "grad_norm": 20.920682907104492,
2836
+ "learning_rate": 5.368098159509203e-06,
2837
+ "loss": 0.1892,
2838
+ "step": 690
2839
+ },
2840
+ {
2841
+ "epoch": 23.862068965517242,
2842
+ "grad_norm": 1.7730119228363037,
2843
+ "learning_rate": 5.061349693251534e-06,
2844
+ "loss": 0.0409,
2845
+ "step": 692
2846
+ },
2847
+ {
2848
+ "epoch": 23.93103448275862,
2849
+ "grad_norm": 8.248014450073242,
2850
+ "learning_rate": 4.7546012269938654e-06,
2851
+ "loss": 0.0499,
2852
+ "step": 694
2853
+ },
2854
+ {
2855
+ "epoch": 24.0,
2856
+ "grad_norm": 13.629621505737305,
2857
+ "learning_rate": 4.447852760736196e-06,
2858
+ "loss": 0.0781,
2859
+ "step": 696
2860
+ },
2861
+ {
2862
+ "epoch": 24.0,
2863
+ "eval_accuracy": 0.6212121212121212,
2864
+ "eval_f1_macro": 0.5392658545631691,
2865
+ "eval_f1_micro": 0.6212121212121212,
2866
+ "eval_f1_weighted": 0.607896474480917,
2867
+ "eval_loss": 1.6025735139846802,
2868
+ "eval_precision_macro": 0.5523980652552082,
2869
+ "eval_precision_micro": 0.6212121212121212,
2870
+ "eval_precision_weighted": 0.6118077193077193,
2871
+ "eval_recall_macro": 0.5412169312169313,
2872
+ "eval_recall_micro": 0.6212121212121212,
2873
+ "eval_recall_weighted": 0.6212121212121212,
2874
+ "eval_runtime": 2.1909,
2875
+ "eval_samples_per_second": 60.25,
2876
+ "eval_steps_per_second": 7.76,
2877
+ "step": 696
2878
+ },
2879
+ {
2880
+ "epoch": 24.06896551724138,
2881
+ "grad_norm": 2.504154920578003,
2882
+ "learning_rate": 4.141104294478528e-06,
2883
+ "loss": 0.0335,
2884
+ "step": 698
2885
+ },
2886
+ {
2887
+ "epoch": 24.137931034482758,
2888
+ "grad_norm": 17.774227142333984,
2889
+ "learning_rate": 3.834355828220859e-06,
2890
+ "loss": 0.1496,
2891
+ "step": 700
2892
+ },
2893
+ {
2894
+ "epoch": 24.20689655172414,
2895
+ "grad_norm": 2.7488608360290527,
2896
+ "learning_rate": 3.52760736196319e-06,
2897
+ "loss": 0.0266,
2898
+ "step": 702
2899
+ },
2900
+ {
2901
+ "epoch": 24.275862068965516,
2902
+ "grad_norm": 7.259423732757568,
2903
+ "learning_rate": 3.2208588957055217e-06,
2904
+ "loss": 0.0863,
2905
+ "step": 704
2906
+ },
2907
+ {
2908
+ "epoch": 24.344827586206897,
2909
+ "grad_norm": 7.789644718170166,
2910
+ "learning_rate": 2.914110429447853e-06,
2911
+ "loss": 0.0491,
2912
+ "step": 706
2913
+ },
2914
+ {
2915
+ "epoch": 24.413793103448278,
2916
+ "grad_norm": 1.2346043586730957,
2917
+ "learning_rate": 2.607361963190184e-06,
2918
+ "loss": 0.0136,
2919
+ "step": 708
2920
+ },
2921
+ {
2922
+ "epoch": 24.482758620689655,
2923
+ "grad_norm": 5.063798904418945,
2924
+ "learning_rate": 2.3006134969325154e-06,
2925
+ "loss": 0.0189,
2926
+ "step": 710
2927
+ },
2928
+ {
2929
+ "epoch": 24.551724137931036,
2930
+ "grad_norm": 6.230537414550781,
2931
+ "learning_rate": 1.9938650306748465e-06,
2932
+ "loss": 0.0431,
2933
+ "step": 712
2934
+ },
2935
+ {
2936
+ "epoch": 24.620689655172413,
2937
+ "grad_norm": 15.741507530212402,
2938
+ "learning_rate": 1.687116564417178e-06,
2939
+ "loss": 0.19,
2940
+ "step": 714
2941
+ },
2942
+ {
2943
+ "epoch": 24.689655172413794,
2944
+ "grad_norm": 12.17914867401123,
2945
+ "learning_rate": 1.3803680981595093e-06,
2946
+ "loss": 0.0662,
2947
+ "step": 716
2948
+ },
2949
+ {
2950
+ "epoch": 24.75862068965517,
2951
+ "grad_norm": 1.5245462656021118,
2952
+ "learning_rate": 1.0736196319018406e-06,
2953
+ "loss": 0.0579,
2954
+ "step": 718
2955
+ },
2956
+ {
2957
+ "epoch": 24.82758620689655,
2958
+ "grad_norm": 3.734255790710449,
2959
+ "learning_rate": 7.668711656441718e-07,
2960
+ "loss": 0.0337,
2961
+ "step": 720
2962
+ },
2963
+ {
2964
+ "epoch": 24.896551724137932,
2965
+ "grad_norm": 25.646230697631836,
2966
+ "learning_rate": 4.601226993865031e-07,
2967
+ "loss": 0.1464,
2968
+ "step": 722
2969
+ },
2970
+ {
2971
+ "epoch": 24.96551724137931,
2972
+ "grad_norm": 11.875574111938477,
2973
+ "learning_rate": 1.5337423312883438e-07,
2974
+ "loss": 0.0792,
2975
+ "step": 724
2976
+ },
2977
+ {
2978
+ "epoch": 25.0,
2979
+ "eval_accuracy": 0.6287878787878788,
2980
+ "eval_f1_macro": 0.5494495726426264,
2981
+ "eval_f1_micro": 0.6287878787878788,
2982
+ "eval_f1_weighted": 0.6179503958679277,
2983
+ "eval_loss": 1.599715232849121,
2984
+ "eval_precision_macro": 0.5716202716202715,
2985
+ "eval_precision_micro": 0.6287878787878788,
2986
+ "eval_precision_weighted": 0.6297404683768321,
2987
+ "eval_recall_macro": 0.5480196523053665,
2988
+ "eval_recall_micro": 0.6287878787878788,
2989
+ "eval_recall_weighted": 0.6287878787878788,
2990
+ "eval_runtime": 2.2024,
2991
+ "eval_samples_per_second": 59.933,
2992
+ "eval_steps_per_second": 7.719,
2993
+ "step": 725
2994
+ },
2995
+ {
2996
+ "epoch": 25.0,
2997
+ "step": 725,
2998
+ "total_flos": 5.76425379898368e+16,
2999
+ "train_loss": 0.7207291752639515,
3000
+ "train_runtime": 641.8354,
3001
+ "train_samples_per_second": 17.995,
3002
+ "train_steps_per_second": 1.13
3003
+ }
3004
+ ],
3005
+ "logging_steps": 2,
3006
+ "max_steps": 725,
3007
+ "num_input_tokens_seen": 0,
3008
+ "num_train_epochs": 25,
3009
+ "save_steps": 500,
3010
+ "stateful_callbacks": {
3011
+ "TrainerControl": {
3012
+ "args": {
3013
+ "should_epoch_stop": false,
3014
+ "should_evaluate": false,
3015
+ "should_log": false,
3016
+ "should_save": true,
3017
+ "should_training_stop": true
3018
+ },
3019
+ "attributes": {}
3020
+ }
3021
+ },
3022
+ "total_flos": 5.76425379898368e+16,
3023
+ "train_batch_size": 8,
3024
+ "trial_name": null,
3025
+ "trial_params": null
3026
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ea3ac6f4af0e603510351eba5316c284c9cc93f20a595cc4c02b335d601cabf
3
+ size 5368