train_half_V2 / trainer_state.json
sharkMeow's picture
End of training
3dcc281 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 100.0,
"eval_steps": 500,
"global_step": 38700,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 0.4617389738559723,
"learning_rate": 9.900516795865633e-06,
"loss": 2.3944,
"step": 387
},
{
"epoch": 2.0,
"grad_norm": 1.3490279912948608,
"learning_rate": 9.800775193798451e-06,
"loss": 2.3064,
"step": 774
},
{
"epoch": 3.0,
"grad_norm": 1.569773554801941,
"learning_rate": 9.70077519379845e-06,
"loss": 2.2698,
"step": 1161
},
{
"epoch": 4.0,
"grad_norm": 1.8885618448257446,
"learning_rate": 9.600775193798451e-06,
"loss": 2.2307,
"step": 1548
},
{
"epoch": 5.0,
"grad_norm": 4.19589900970459,
"learning_rate": 9.50077519379845e-06,
"loss": 2.1967,
"step": 1935
},
{
"epoch": 6.0,
"grad_norm": 2.8683922290802,
"learning_rate": 9.40077519379845e-06,
"loss": 2.1706,
"step": 2322
},
{
"epoch": 7.0,
"grad_norm": 2.789121389389038,
"learning_rate": 9.301033591731267e-06,
"loss": 2.1468,
"step": 2709
},
{
"epoch": 8.0,
"grad_norm": 2.4587924480438232,
"learning_rate": 9.201033591731266e-06,
"loss": 2.1146,
"step": 3096
},
{
"epoch": 9.0,
"grad_norm": 3.5958080291748047,
"learning_rate": 9.101033591731266e-06,
"loss": 2.1352,
"step": 3483
},
{
"epoch": 10.0,
"grad_norm": 3.489946126937866,
"learning_rate": 9.001033591731267e-06,
"loss": 2.0821,
"step": 3870
},
{
"epoch": 11.0,
"grad_norm": 4.598156929016113,
"learning_rate": 8.901033591731268e-06,
"loss": 2.0615,
"step": 4257
},
{
"epoch": 12.0,
"grad_norm": 3.008528709411621,
"learning_rate": 8.801291989664084e-06,
"loss": 2.0678,
"step": 4644
},
{
"epoch": 13.0,
"grad_norm": 3.5459799766540527,
"learning_rate": 8.701291989664083e-06,
"loss": 2.0456,
"step": 5031
},
{
"epoch": 14.0,
"grad_norm": 3.375678300857544,
"learning_rate": 8.601291989664084e-06,
"loss": 2.0267,
"step": 5418
},
{
"epoch": 15.0,
"grad_norm": 3.702049970626831,
"learning_rate": 8.501291989664083e-06,
"loss": 2.0277,
"step": 5805
},
{
"epoch": 16.0,
"grad_norm": 3.2726526260375977,
"learning_rate": 8.401291989664084e-06,
"loss": 2.0316,
"step": 6192
},
{
"epoch": 17.0,
"grad_norm": 4.813005447387695,
"learning_rate": 8.301291989664083e-06,
"loss": 2.0194,
"step": 6579
},
{
"epoch": 18.0,
"grad_norm": 4.172696590423584,
"learning_rate": 8.201550387596899e-06,
"loss": 1.9955,
"step": 6966
},
{
"epoch": 19.0,
"grad_norm": 4.579292297363281,
"learning_rate": 8.1015503875969e-06,
"loss": 2.0042,
"step": 7353
},
{
"epoch": 20.0,
"grad_norm": 2.4596266746520996,
"learning_rate": 8.001550387596901e-06,
"loss": 1.9876,
"step": 7740
},
{
"epoch": 21.0,
"grad_norm": 3.477017641067505,
"learning_rate": 7.9015503875969e-06,
"loss": 1.9997,
"step": 8127
},
{
"epoch": 22.0,
"grad_norm": 3.163322925567627,
"learning_rate": 7.8015503875969e-06,
"loss": 1.9954,
"step": 8514
},
{
"epoch": 23.0,
"grad_norm": 3.398844003677368,
"learning_rate": 7.701808785529717e-06,
"loss": 1.9795,
"step": 8901
},
{
"epoch": 24.0,
"grad_norm": 2.4849860668182373,
"learning_rate": 7.601808785529716e-06,
"loss": 1.9721,
"step": 9288
},
{
"epoch": 25.0,
"grad_norm": 3.936396598815918,
"learning_rate": 7.5018087855297165e-06,
"loss": 1.9711,
"step": 9675
},
{
"epoch": 26.0,
"grad_norm": 3.5928125381469727,
"learning_rate": 7.401808785529716e-06,
"loss": 1.9699,
"step": 10062
},
{
"epoch": 27.0,
"grad_norm": 4.320117473602295,
"learning_rate": 7.301808785529716e-06,
"loss": 1.9653,
"step": 10449
},
{
"epoch": 28.0,
"grad_norm": 3.197213649749756,
"learning_rate": 7.2020671834625335e-06,
"loss": 1.9485,
"step": 10836
},
{
"epoch": 29.0,
"grad_norm": 5.386129379272461,
"learning_rate": 7.102067183462534e-06,
"loss": 1.9496,
"step": 11223
},
{
"epoch": 30.0,
"grad_norm": 3.7523598670959473,
"learning_rate": 7.002067183462533e-06,
"loss": 1.9362,
"step": 11610
},
{
"epoch": 31.0,
"grad_norm": 2.684497356414795,
"learning_rate": 6.902067183462533e-06,
"loss": 1.9541,
"step": 11997
},
{
"epoch": 32.0,
"grad_norm": 3.1632211208343506,
"learning_rate": 6.802067183462533e-06,
"loss": 1.9393,
"step": 12384
},
{
"epoch": 33.0,
"grad_norm": 3.8801021575927734,
"learning_rate": 6.702067183462533e-06,
"loss": 1.9434,
"step": 12771
},
{
"epoch": 34.0,
"grad_norm": 5.696396350860596,
"learning_rate": 6.602325581395349e-06,
"loss": 1.9381,
"step": 13158
},
{
"epoch": 35.0,
"grad_norm": 4.289793968200684,
"learning_rate": 6.502325581395349e-06,
"loss": 1.907,
"step": 13545
},
{
"epoch": 36.0,
"grad_norm": 2.529639720916748,
"learning_rate": 6.402325581395349e-06,
"loss": 1.9195,
"step": 13932
},
{
"epoch": 37.0,
"grad_norm": 3.81178879737854,
"learning_rate": 6.30232558139535e-06,
"loss": 1.9165,
"step": 14319
},
{
"epoch": 38.0,
"grad_norm": 2.9904661178588867,
"learning_rate": 6.2023255813953495e-06,
"loss": 1.9256,
"step": 14706
},
{
"epoch": 39.0,
"grad_norm": 3.6421236991882324,
"learning_rate": 6.10232558139535e-06,
"loss": 1.9113,
"step": 15093
},
{
"epoch": 40.0,
"grad_norm": 2.2317612171173096,
"learning_rate": 6.002583979328166e-06,
"loss": 1.9097,
"step": 15480
},
{
"epoch": 41.0,
"grad_norm": 3.1146578788757324,
"learning_rate": 5.902583979328166e-06,
"loss": 1.9052,
"step": 15867
},
{
"epoch": 42.0,
"grad_norm": 2.7279844284057617,
"learning_rate": 5.802583979328166e-06,
"loss": 1.8954,
"step": 16254
},
{
"epoch": 43.0,
"grad_norm": 4.926011085510254,
"learning_rate": 5.702583979328166e-06,
"loss": 1.8875,
"step": 16641
},
{
"epoch": 44.0,
"grad_norm": 3.347382068634033,
"learning_rate": 5.602583979328166e-06,
"loss": 1.8773,
"step": 17028
},
{
"epoch": 45.0,
"grad_norm": 4.180445671081543,
"learning_rate": 5.502583979328165e-06,
"loss": 1.9004,
"step": 17415
},
{
"epoch": 46.0,
"grad_norm": 4.547725677490234,
"learning_rate": 5.402842377260983e-06,
"loss": 1.8963,
"step": 17802
},
{
"epoch": 47.0,
"grad_norm": 3.7642316818237305,
"learning_rate": 5.302842377260983e-06,
"loss": 1.8963,
"step": 18189
},
{
"epoch": 48.0,
"grad_norm": 3.2858059406280518,
"learning_rate": 5.202842377260983e-06,
"loss": 1.8892,
"step": 18576
},
{
"epoch": 49.0,
"grad_norm": 2.402951240539551,
"learning_rate": 5.102842377260982e-06,
"loss": 1.8924,
"step": 18963
},
{
"epoch": 50.0,
"grad_norm": 2.7793326377868652,
"learning_rate": 5.002842377260982e-06,
"loss": 1.893,
"step": 19350
},
{
"epoch": 51.0,
"grad_norm": 3.6147751808166504,
"learning_rate": 4.9028423772609825e-06,
"loss": 1.8781,
"step": 19737
},
{
"epoch": 52.0,
"grad_norm": 4.644423484802246,
"learning_rate": 4.803100775193798e-06,
"loss": 1.8664,
"step": 20124
},
{
"epoch": 53.0,
"grad_norm": 3.9448044300079346,
"learning_rate": 4.703100775193799e-06,
"loss": 1.8898,
"step": 20511
},
{
"epoch": 54.0,
"grad_norm": 3.2738654613494873,
"learning_rate": 4.6031007751937986e-06,
"loss": 1.876,
"step": 20898
},
{
"epoch": 55.0,
"grad_norm": 2.4791011810302734,
"learning_rate": 4.503100775193799e-06,
"loss": 1.864,
"step": 21285
},
{
"epoch": 56.0,
"grad_norm": 2.873185873031616,
"learning_rate": 4.403100775193799e-06,
"loss": 1.8607,
"step": 21672
},
{
"epoch": 57.0,
"grad_norm": 3.4380970001220703,
"learning_rate": 4.303100775193799e-06,
"loss": 1.8788,
"step": 22059
},
{
"epoch": 58.0,
"grad_norm": 6.063451766967773,
"learning_rate": 4.203100775193798e-06,
"loss": 1.8555,
"step": 22446
},
{
"epoch": 59.0,
"grad_norm": 3.49165415763855,
"learning_rate": 4.103100775193799e-06,
"loss": 1.8728,
"step": 22833
},
{
"epoch": 60.0,
"grad_norm": 3.0042760372161865,
"learning_rate": 4.003100775193799e-06,
"loss": 1.8626,
"step": 23220
},
{
"epoch": 61.0,
"grad_norm": 5.144619464874268,
"learning_rate": 3.9031007751937985e-06,
"loss": 1.8748,
"step": 23607
},
{
"epoch": 62.0,
"grad_norm": 3.293827533721924,
"learning_rate": 3.8031007751937986e-06,
"loss": 1.8672,
"step": 23994
},
{
"epoch": 63.0,
"grad_norm": 3.602724313735962,
"learning_rate": 3.7033591731266153e-06,
"loss": 1.8595,
"step": 24381
},
{
"epoch": 64.0,
"grad_norm": 4.562984466552734,
"learning_rate": 3.6033591731266154e-06,
"loss": 1.8689,
"step": 24768
},
{
"epoch": 65.0,
"grad_norm": 1.9634078741073608,
"learning_rate": 3.5036175710594318e-06,
"loss": 1.8655,
"step": 25155
},
{
"epoch": 66.0,
"grad_norm": 2.359875440597534,
"learning_rate": 3.4036175710594314e-06,
"loss": 1.853,
"step": 25542
},
{
"epoch": 67.0,
"grad_norm": 2.7939252853393555,
"learning_rate": 3.303617571059432e-06,
"loss": 1.8539,
"step": 25929
},
{
"epoch": 68.0,
"grad_norm": 3.8576886653900146,
"learning_rate": 3.203617571059432e-06,
"loss": 1.8579,
"step": 26316
},
{
"epoch": 69.0,
"grad_norm": 3.667564630508423,
"learning_rate": 3.1036175710594318e-06,
"loss": 1.8474,
"step": 26703
},
{
"epoch": 70.0,
"grad_norm": 2.811600923538208,
"learning_rate": 3.003875968992248e-06,
"loss": 1.8442,
"step": 27090
},
{
"epoch": 71.0,
"grad_norm": 4.078073978424072,
"learning_rate": 2.9038759689922486e-06,
"loss": 1.8308,
"step": 27477
},
{
"epoch": 72.0,
"grad_norm": 2.925967216491699,
"learning_rate": 2.8038759689922483e-06,
"loss": 1.852,
"step": 27864
},
{
"epoch": 73.0,
"grad_norm": 3.747209072113037,
"learning_rate": 2.7038759689922484e-06,
"loss": 1.8525,
"step": 28251
},
{
"epoch": 74.0,
"grad_norm": 3.608250141143799,
"learning_rate": 2.603875968992248e-06,
"loss": 1.8525,
"step": 28638
},
{
"epoch": 75.0,
"grad_norm": 3.8558218479156494,
"learning_rate": 2.503875968992248e-06,
"loss": 1.854,
"step": 29025
},
{
"epoch": 76.0,
"grad_norm": 5.439558982849121,
"learning_rate": 2.4041343669250645e-06,
"loss": 1.8398,
"step": 29412
},
{
"epoch": 77.0,
"grad_norm": 3.1695291996002197,
"learning_rate": 2.304134366925065e-06,
"loss": 1.8546,
"step": 29799
},
{
"epoch": 78.0,
"grad_norm": 5.20058012008667,
"learning_rate": 2.2041343669250647e-06,
"loss": 1.8436,
"step": 30186
},
{
"epoch": 79.0,
"grad_norm": 3.13725209236145,
"learning_rate": 2.104134366925065e-06,
"loss": 1.8588,
"step": 30573
},
{
"epoch": 80.0,
"grad_norm": 3.2893519401550293,
"learning_rate": 2.004134366925065e-06,
"loss": 1.8483,
"step": 30960
},
{
"epoch": 81.0,
"grad_norm": 3.6456849575042725,
"learning_rate": 1.9041343669250649e-06,
"loss": 1.8335,
"step": 31347
},
{
"epoch": 82.0,
"grad_norm": 2.9292547702789307,
"learning_rate": 1.8041343669250648e-06,
"loss": 1.8392,
"step": 31734
},
{
"epoch": 83.0,
"grad_norm": 1.7664443254470825,
"learning_rate": 1.7041343669250649e-06,
"loss": 1.8599,
"step": 32121
},
{
"epoch": 84.0,
"grad_norm": 2.2330174446105957,
"learning_rate": 1.6041343669250648e-06,
"loss": 1.8354,
"step": 32508
},
{
"epoch": 85.0,
"grad_norm": 2.521230936050415,
"learning_rate": 1.5043927648578813e-06,
"loss": 1.8449,
"step": 32895
},
{
"epoch": 86.0,
"grad_norm": 3.481978178024292,
"learning_rate": 1.4043927648578812e-06,
"loss": 1.8249,
"step": 33282
},
{
"epoch": 87.0,
"grad_norm": 2.547130823135376,
"learning_rate": 1.3043927648578813e-06,
"loss": 1.8433,
"step": 33669
},
{
"epoch": 88.0,
"grad_norm": 3.2912960052490234,
"learning_rate": 1.2043927648578812e-06,
"loss": 1.8207,
"step": 34056
},
{
"epoch": 89.0,
"grad_norm": 5.078946113586426,
"learning_rate": 1.104392764857881e-06,
"loss": 1.8373,
"step": 34443
},
{
"epoch": 90.0,
"grad_norm": 3.1195156574249268,
"learning_rate": 1.0043927648578812e-06,
"loss": 1.8433,
"step": 34830
},
{
"epoch": 91.0,
"grad_norm": 3.4918460845947266,
"learning_rate": 9.043927648578812e-07,
"loss": 1.8358,
"step": 35217
},
{
"epoch": 92.0,
"grad_norm": 3.529872179031372,
"learning_rate": 8.043927648578811e-07,
"loss": 1.8395,
"step": 35604
},
{
"epoch": 93.0,
"grad_norm": 1.9922144412994385,
"learning_rate": 7.043927648578812e-07,
"loss": 1.8397,
"step": 35991
},
{
"epoch": 94.0,
"grad_norm": 2.5432543754577637,
"learning_rate": 6.043927648578812e-07,
"loss": 1.8323,
"step": 36378
},
{
"epoch": 95.0,
"grad_norm": 2.306816816329956,
"learning_rate": 5.043927648578812e-07,
"loss": 1.85,
"step": 36765
},
{
"epoch": 96.0,
"grad_norm": 3.3451128005981445,
"learning_rate": 4.0465116279069773e-07,
"loss": 1.8461,
"step": 37152
},
{
"epoch": 97.0,
"grad_norm": 2.5839929580688477,
"learning_rate": 3.046511627906977e-07,
"loss": 1.8522,
"step": 37539
},
{
"epoch": 98.0,
"grad_norm": 3.5228729248046875,
"learning_rate": 2.0465116279069766e-07,
"loss": 1.8449,
"step": 37926
},
{
"epoch": 99.0,
"grad_norm": 2.9826176166534424,
"learning_rate": 1.0465116279069768e-07,
"loss": 1.8333,
"step": 38313
},
{
"epoch": 100.0,
"grad_norm": 3.2512013912200928,
"learning_rate": 4.651162790697675e-09,
"loss": 1.8459,
"step": 38700
},
{
"epoch": 100.0,
"step": 38700,
"total_flos": 2.0309886965211648e+18,
"train_loss": 1.927858536594598,
"train_runtime": 42079.8744,
"train_samples_per_second": 183.867,
"train_steps_per_second": 0.92
}
],
"logging_steps": 387,
"max_steps": 38700,
"num_input_tokens_seen": 0,
"num_train_epochs": 100,
"save_steps": 6800,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.0309886965211648e+18,
"train_batch_size": 50,
"trial_name": null,
"trial_params": null
}